1 /*
2  * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
3  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
4  * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
5  * Copyright (c) 2000-2010 by Hewlett-Packard Development Company.
6  * All rights reserved.
7  *
8  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
10  *
11  * Permission is hereby granted to use or copy this program
12  * for any purpose,  provided the above notices are retained on all copies.
13  * Permission to modify the code and to distribute modified code is granted,
14  * provided the above notices are retained, and a notice that the code was
15  * modified is included with the above copyright notice.
16  */
17 
18 #include "private/pthread_support.h"
19 
20 /* This probably needs more porting work to ppc64. */
21 
22 #if defined(GC_DARWIN_THREADS)
23 
24 #include <sys/sysctl.h>
25 #include <mach/machine.h>
26 #include <CoreFoundation/CoreFoundation.h>
27 
28 /* From "Inside Mac OS X - Mach-O Runtime Architecture" published by Apple
29    Page 49:
30    "The space beneath the stack pointer, where a new stack frame would normally
31    be allocated, is called the red zone. This area as shown in Figure 3-2 may
32    be used for any purpose as long as a new stack frame does not need to be
33    added to the stack."
34 
35    Page 50: "If a leaf procedure's red zone usage would exceed 224 bytes, then
36    it must set up a stack frame just like routines that call other routines."
37 */
38 #ifdef POWERPC
39 # if CPP_WORDSZ == 32
40 #   define PPC_RED_ZONE_SIZE 224
41 # elif CPP_WORDSZ == 64
42 #   define PPC_RED_ZONE_SIZE 320
43 # endif
44 #endif
45 
46 #ifndef DARWIN_DONT_PARSE_STACK
47 
48 typedef struct StackFrame {
49   unsigned long savedSP;
50   unsigned long savedCR;
51   unsigned long savedLR;
52   unsigned long reserved[2];
53   unsigned long savedRTOC;
54 } StackFrame;
55 
GC_FindTopOfStack(unsigned long stack_start)56 GC_INNER ptr_t GC_FindTopOfStack(unsigned long stack_start)
57 {
58   StackFrame *frame = (StackFrame *)stack_start;
59 
60   if (stack_start == 0) {
61 #   ifdef POWERPC
62 #     if CPP_WORDSZ == 32
63         __asm__ __volatile__ ("lwz %0,0(r1)" : "=r" (frame));
64 #     else
65         __asm__ __volatile__ ("ld %0,0(r1)" : "=r" (frame));
66 #     endif
67 #   elif defined(ARM32)
68         volatile ptr_t sp_reg;
69         __asm__ __volatile__ ("mov %0, r7\n" : "=r" (sp_reg));
70         frame = (StackFrame *)sp_reg;
71 #   elif defined(AARCH64)
72         volatile ptr_t sp_reg;
73         __asm__ __volatile__ ("mov %0, x29\n" : "=r" (sp_reg));
74         frame = (StackFrame *)sp_reg;
75 #   else
76       ABORT("GC_FindTopOfStack(0) is not implemented");
77 #   endif
78   }
79 
80 # ifdef DEBUG_THREADS_EXTRA
81     GC_log_printf("FindTopOfStack start at sp = %p\n", (void *)frame);
82 # endif
83   while (frame->savedSP != 0) {
84     /* if there are no more stack frames, stop */
85 
86     frame = (StackFrame*)frame->savedSP;
87 
88     /* we do these next two checks after going to the next frame
89        because the LR for the first stack frame in the loop
90        is not set up on purpose, so we shouldn't check it. */
91     if ((frame->savedLR & ~0x3) == 0 || (frame->savedLR & ~0x3) == ~0x3UL)
92       break; /* if the next LR is bogus, stop */
93   }
94 # ifdef DEBUG_THREADS_EXTRA
95     GC_log_printf("FindTopOfStack finish at sp = %p\n", (void *)frame);
96 # endif
97   return (ptr_t)frame;
98 }
99 
100 #endif /* !DARWIN_DONT_PARSE_STACK */
101 
102 /* GC_query_task_threads controls whether to obtain the list of */
103 /* the threads from the kernel or to use GC_threads table.      */
104 #ifdef GC_NO_THREADS_DISCOVERY
105 # define GC_query_task_threads FALSE
106 #elif defined(GC_DISCOVER_TASK_THREADS)
107 # define GC_query_task_threads TRUE
108 #else
109   STATIC GC_bool GC_query_task_threads = FALSE;
110 #endif /* !GC_NO_THREADS_DISCOVERY */
111 
112 /* Use implicit threads registration (all task threads excluding the GC */
113 /* special ones are stopped and scanned).  Should be called before      */
114 /* GC_INIT() (or, at least, before going multi-threaded).  Deprecated.  */
GC_use_threads_discovery(void)115 GC_API void GC_CALL GC_use_threads_discovery(void)
116 {
117 # if defined(GC_NO_THREADS_DISCOVERY) || defined(DARWIN_DONT_PARSE_STACK)
118     ABORT("Darwin task-threads-based stop and push unsupported");
119 # else
120 #   ifndef GC_ALWAYS_MULTITHREADED
121       GC_ASSERT(!GC_need_to_lock);
122 #   endif
123 #   ifndef GC_DISCOVER_TASK_THREADS
124       GC_query_task_threads = TRUE;
125 #   endif
126     GC_init_parallel(); /* just to be consistent with Win32 one */
127 # endif
128 }
129 
130 #ifndef kCFCoreFoundationVersionNumber_iOS_8_0
131 # define kCFCoreFoundationVersionNumber_iOS_8_0 1140.1
132 #endif
133 
134 /* Evaluates the stack range for a given thread.  Returns the lower     */
135 /* bound and sets *phi to the upper one.                                */
GC_stack_range_for(ptr_t * phi,thread_act_t thread,GC_thread p,GC_bool thread_blocked,mach_port_t my_thread,ptr_t * paltstack_lo,ptr_t * paltstack_hi GC_ATTR_UNUSED)136 STATIC ptr_t GC_stack_range_for(ptr_t *phi, thread_act_t thread, GC_thread p,
137                                 GC_bool thread_blocked, mach_port_t my_thread,
138                                 ptr_t *paltstack_lo,
139                                 ptr_t *paltstack_hi GC_ATTR_UNUSED)
140 {
141   ptr_t lo;
142   if (thread == my_thread) {
143     GC_ASSERT(!thread_blocked);
144     lo = GC_approx_sp();
145 #   ifndef DARWIN_DONT_PARSE_STACK
146       *phi = GC_FindTopOfStack(0);
147 #   endif
148 
149   } else if (thread_blocked) {
150 #   if defined(CPPCHECK)
151       if (NULL == p) ABORT("Invalid GC_thread passed to GC_stack_range_for");
152 #   endif
153     lo = p->stop_info.stack_ptr;
154 #   ifndef DARWIN_DONT_PARSE_STACK
155       *phi = p->topOfStack;
156 #   endif
157 
158   } else {
159     /* MACHINE_THREAD_STATE_COUNT does not seem to be defined       */
160     /* everywhere.  Hence we use our own version.  Alternatively,   */
161     /* we could use THREAD_STATE_MAX (but seems to be not optimal). */
162     kern_return_t kern_result;
163     GC_THREAD_STATE_T state;
164 
165 #   if defined(ARM32) && defined(ARM_THREAD_STATE32)
166       /* Use ARM_UNIFIED_THREAD_STATE on iOS8+ 32-bit targets and on    */
167       /* 64-bit H/W (iOS7+ 32-bit mode).                                */
168       size_t size;
169       static cpu_type_t cputype = 0;
170 
171       if (cputype == 0) {
172         sysctlbyname("hw.cputype", &cputype, &size, NULL, 0);
173       }
174       if (cputype == CPU_TYPE_ARM64
175           || kCFCoreFoundationVersionNumber
176              >= kCFCoreFoundationVersionNumber_iOS_8_0) {
177         arm_unified_thread_state_t unified_state;
178         mach_msg_type_number_t unified_thread_state_count
179                                         = ARM_UNIFIED_THREAD_STATE_COUNT;
180 #       if defined(CPPCHECK)
181 #         define GC_ARM_UNIFIED_THREAD_STATE 1
182 #       else
183 #         define GC_ARM_UNIFIED_THREAD_STATE ARM_UNIFIED_THREAD_STATE
184 #       endif
185         kern_result = thread_get_state(thread, GC_ARM_UNIFIED_THREAD_STATE,
186                                        (natural_t *)&unified_state,
187                                        &unified_thread_state_count);
188 #       if !defined(CPPCHECK)
189           if (unified_state.ash.flavor != ARM_THREAD_STATE32) {
190             ABORT("unified_state flavor should be ARM_THREAD_STATE32");
191           }
192 #       endif
193         state = unified_state;
194       } else
195 #   endif
196     /* else */ {
197       mach_msg_type_number_t thread_state_count = GC_MACH_THREAD_STATE_COUNT;
198 
199       /* Get the thread state (registers, etc.) */
200       do {
201         kern_result = thread_get_state(thread, GC_MACH_THREAD_STATE,
202                                        (natural_t *)&state,
203                                        &thread_state_count);
204       } while (kern_result == KERN_ABORTED);
205     }
206 #   ifdef DEBUG_THREADS
207       GC_log_printf("thread_get_state returns value = %d\n", kern_result);
208 #   endif
209     if (kern_result != KERN_SUCCESS)
210       ABORT("thread_get_state failed");
211 
212 #   if defined(I386)
213       lo = (ptr_t)state.THREAD_FLD(esp);
214 #     ifndef DARWIN_DONT_PARSE_STACK
215         *phi = GC_FindTopOfStack(state.THREAD_FLD(esp));
216 #     endif
217       GC_push_one(state.THREAD_FLD(eax));
218       GC_push_one(state.THREAD_FLD(ebx));
219       GC_push_one(state.THREAD_FLD(ecx));
220       GC_push_one(state.THREAD_FLD(edx));
221       GC_push_one(state.THREAD_FLD(edi));
222       GC_push_one(state.THREAD_FLD(esi));
223       GC_push_one(state.THREAD_FLD(ebp));
224 
225 #   elif defined(X86_64)
226       lo = (ptr_t)state.THREAD_FLD(rsp);
227 #     ifndef DARWIN_DONT_PARSE_STACK
228         *phi = GC_FindTopOfStack(state.THREAD_FLD(rsp));
229 #     endif
230       GC_push_one(state.THREAD_FLD(rax));
231       GC_push_one(state.THREAD_FLD(rbx));
232       GC_push_one(state.THREAD_FLD(rcx));
233       GC_push_one(state.THREAD_FLD(rdx));
234       GC_push_one(state.THREAD_FLD(rdi));
235       GC_push_one(state.THREAD_FLD(rsi));
236       GC_push_one(state.THREAD_FLD(rbp));
237       /* GC_push_one(state.THREAD_FLD(rsp)); */
238       GC_push_one(state.THREAD_FLD(r8));
239       GC_push_one(state.THREAD_FLD(r9));
240       GC_push_one(state.THREAD_FLD(r10));
241       GC_push_one(state.THREAD_FLD(r11));
242       GC_push_one(state.THREAD_FLD(r12));
243       GC_push_one(state.THREAD_FLD(r13));
244       GC_push_one(state.THREAD_FLD(r14));
245       GC_push_one(state.THREAD_FLD(r15));
246 
247 #   elif defined(POWERPC)
248       lo = (ptr_t)(state.THREAD_FLD(r1) - PPC_RED_ZONE_SIZE);
249 #     ifndef DARWIN_DONT_PARSE_STACK
250         *phi = GC_FindTopOfStack(state.THREAD_FLD(r1));
251 #     endif
252       GC_push_one(state.THREAD_FLD(r0));
253       GC_push_one(state.THREAD_FLD(r2));
254       GC_push_one(state.THREAD_FLD(r3));
255       GC_push_one(state.THREAD_FLD(r4));
256       GC_push_one(state.THREAD_FLD(r5));
257       GC_push_one(state.THREAD_FLD(r6));
258       GC_push_one(state.THREAD_FLD(r7));
259       GC_push_one(state.THREAD_FLD(r8));
260       GC_push_one(state.THREAD_FLD(r9));
261       GC_push_one(state.THREAD_FLD(r10));
262       GC_push_one(state.THREAD_FLD(r11));
263       GC_push_one(state.THREAD_FLD(r12));
264       GC_push_one(state.THREAD_FLD(r13));
265       GC_push_one(state.THREAD_FLD(r14));
266       GC_push_one(state.THREAD_FLD(r15));
267       GC_push_one(state.THREAD_FLD(r16));
268       GC_push_one(state.THREAD_FLD(r17));
269       GC_push_one(state.THREAD_FLD(r18));
270       GC_push_one(state.THREAD_FLD(r19));
271       GC_push_one(state.THREAD_FLD(r20));
272       GC_push_one(state.THREAD_FLD(r21));
273       GC_push_one(state.THREAD_FLD(r22));
274       GC_push_one(state.THREAD_FLD(r23));
275       GC_push_one(state.THREAD_FLD(r24));
276       GC_push_one(state.THREAD_FLD(r25));
277       GC_push_one(state.THREAD_FLD(r26));
278       GC_push_one(state.THREAD_FLD(r27));
279       GC_push_one(state.THREAD_FLD(r28));
280       GC_push_one(state.THREAD_FLD(r29));
281       GC_push_one(state.THREAD_FLD(r30));
282       GC_push_one(state.THREAD_FLD(r31));
283 
284 #   elif defined(ARM32)
285       lo = (ptr_t)state.THREAD_FLD(sp);
286 #     ifndef DARWIN_DONT_PARSE_STACK
287         *phi = GC_FindTopOfStack(state.THREAD_FLD(r[7])); /* fp */
288 #     endif
289       {
290         int j;
291         for (j = 0; j < 7; j++)
292           GC_push_one(state.THREAD_FLD(r[j]));
293         j++; /* "r7" is skipped (iOS uses it as a frame pointer) */
294         for (; j <= 12; j++)
295           GC_push_one(state.THREAD_FLD(r[j]));
296       }
297       /* "cpsr", "pc" and "sp" are skipped */
298       GC_push_one(state.THREAD_FLD(lr));
299 
300 #   elif defined(AARCH64)
301       lo = (ptr_t)state.THREAD_FLD(sp);
302 #     ifndef DARWIN_DONT_PARSE_STACK
303         *phi = GC_FindTopOfStack(state.THREAD_FLD(fp));
304 #     endif
305       {
306         int j;
307         for (j = 0; j <= 28; j++) {
308           GC_push_one(state.THREAD_FLD(x[j]));
309         }
310       }
311       /* "cpsr", "fp", "pc" and "sp" are skipped */
312       GC_push_one(state.THREAD_FLD(lr));
313 
314 #   elif defined(CPPCHECK)
315       lo = NULL;
316 #   else
317 #     error FIXME for non-arm/ppc/x86 architectures
318 #   endif
319   } /* thread != my_thread */
320 
321 # ifdef DARWIN_DONT_PARSE_STACK
322     /* p is guaranteed to be non-NULL regardless of GC_query_task_threads. */
323     *phi = (p->flags & MAIN_THREAD) != 0 ? GC_stackbottom : p->stack_end;
324 # endif
325 
326   /* TODO: Determine p and handle altstack if !DARWIN_DONT_PARSE_STACK */
327 # ifdef DARWIN_DONT_PARSE_STACK
328   if (p->altstack != NULL && (word)p->altstack <= (word)lo
329       && (word)lo <= (word)p->altstack + p->altstack_size) {
330     *paltstack_lo = lo;
331     *paltstack_hi = p->altstack + p->altstack_size;
332     lo = p->stack;
333     *phi = p->stack + p->stack_size;
334   } else
335 # endif
336   /* else */ {
337     *paltstack_lo = NULL;
338   }
339 # ifdef DEBUG_THREADS
340     GC_log_printf("Darwin: Stack for thread %p = [%p,%p)\n",
341                   (void *)(word)thread, (void *)lo, (void *)(*phi));
342 # endif
343   return lo;
344 }
345 
GC_push_all_stacks(void)346 GC_INNER void GC_push_all_stacks(void)
347 {
348   ptr_t hi, altstack_lo, altstack_hi;
349   task_t my_task = current_task();
350   mach_port_t my_thread = mach_thread_self();
351   GC_bool found_me = FALSE;
352   int nthreads = 0;
353   word total_size = 0;
354   mach_msg_type_number_t listcount = (mach_msg_type_number_t)THREAD_TABLE_SZ;
355   if (!EXPECT(GC_thr_initialized, TRUE))
356     GC_thr_init();
357 
358 # ifndef DARWIN_DONT_PARSE_STACK
359     if (GC_query_task_threads) {
360       int i;
361       kern_return_t kern_result;
362       thread_act_array_t act_list = 0;
363 
364       /* Obtain the list of the threads from the kernel.  */
365       kern_result = task_threads(my_task, &act_list, &listcount);
366       if (kern_result != KERN_SUCCESS)
367         ABORT("task_threads failed");
368 
369       for (i = 0; i < (int)listcount; i++) {
370         thread_act_t thread = act_list[i];
371         ptr_t lo = GC_stack_range_for(&hi, thread, NULL, FALSE, my_thread,
372                                       &altstack_lo, &altstack_hi);
373 
374         if (lo) {
375           GC_ASSERT((word)lo <= (word)hi);
376           total_size += hi - lo;
377           GC_push_all_stack(lo, hi);
378         }
379         /* TODO: Handle altstack */
380         nthreads++;
381         if (thread == my_thread)
382           found_me = TRUE;
383         mach_port_deallocate(my_task, thread);
384       } /* for (i=0; ...) */
385 
386       vm_deallocate(my_task, (vm_address_t)act_list,
387                     sizeof(thread_t) * listcount);
388     } else
389 # endif /* !DARWIN_DONT_PARSE_STACK */
390   /* else */ {
391     int i;
392 
393     for (i = 0; i < (int)listcount; i++) {
394       GC_thread p;
395 
396       for (p = GC_threads[i]; p != NULL; p = p->next)
397         if ((p->flags & FINISHED) == 0) {
398           thread_act_t thread = (thread_act_t)p->stop_info.mach_thread;
399           ptr_t lo = GC_stack_range_for(&hi, thread, p,
400                                         (GC_bool)p->thread_blocked,
401                                         my_thread, &altstack_lo,
402                                         &altstack_hi);
403 
404           if (lo) {
405             GC_ASSERT((word)lo <= (word)hi);
406             total_size += hi - lo;
407             GC_push_all_stack_sections(lo, hi, p->traced_stack_sect);
408           }
409           if (altstack_lo) {
410             total_size += altstack_hi - altstack_lo;
411             GC_push_all_stack(altstack_lo, altstack_hi);
412           }
413           nthreads++;
414           if (thread == my_thread)
415             found_me = TRUE;
416         }
417     } /* for (i=0; ...) */
418   }
419 
420   mach_port_deallocate(my_task, my_thread);
421   GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n", nthreads);
422   if (!found_me && !GC_in_thread_creation)
423     ABORT("Collecting from unknown thread");
424   GC_total_stacksize = total_size;
425 }
426 
427 #ifndef GC_NO_THREADS_DISCOVERY
428 
429 # ifdef MPROTECT_VDB
430     STATIC mach_port_t GC_mach_handler_thread = 0;
431     STATIC GC_bool GC_use_mach_handler_thread = FALSE;
432 
GC_darwin_register_mach_handler_thread(mach_port_t thread)433     GC_INNER void GC_darwin_register_mach_handler_thread(mach_port_t thread)
434     {
435       GC_mach_handler_thread = thread;
436       GC_use_mach_handler_thread = TRUE;
437     }
438 # endif /* MPROTECT_VDB */
439 
440 # ifndef GC_MAX_MACH_THREADS
441 #   define GC_MAX_MACH_THREADS THREAD_TABLE_SZ
442 # endif
443 
444   struct GC_mach_thread {
445     thread_act_t thread;
446     GC_bool suspended;
447   };
448 
449   struct GC_mach_thread GC_mach_threads[GC_MAX_MACH_THREADS];
450   STATIC int GC_mach_threads_count = 0;
451   /* FIXME: it is better to implement GC_mach_threads as a hash set.  */
452 
453 /* returns true if there's a thread in act_list that wasn't in old_list */
GC_suspend_thread_list(thread_act_array_t act_list,int count,thread_act_array_t old_list,int old_count,task_t my_task,mach_port_t my_thread)454 STATIC GC_bool GC_suspend_thread_list(thread_act_array_t act_list, int count,
455                                       thread_act_array_t old_list,
456                                       int old_count, task_t my_task,
457                                       mach_port_t my_thread)
458 {
459   int i;
460   int j = -1;
461   GC_bool changed = FALSE;
462 
463   for (i = 0; i < count; i++) {
464     thread_act_t thread = act_list[i];
465     GC_bool found;
466     kern_return_t kern_result;
467 
468     if (thread == my_thread
469 #       ifdef MPROTECT_VDB
470           || (GC_mach_handler_thread == thread && GC_use_mach_handler_thread)
471 #       endif
472 #       ifdef PARALLEL_MARK
473           || GC_is_mach_marker(thread) /* ignore the parallel markers */
474 #       endif
475         ) {
476       /* Do not add our one, parallel marker and the handler threads;   */
477       /* consider it as found (e.g., it was processed earlier).         */
478       mach_port_deallocate(my_task, thread);
479       continue;
480     }
481 
482     /* find the current thread in the old list */
483     found = FALSE;
484     {
485       int last_found = j; /* remember the previous found thread index */
486 
487       /* Search for the thread starting from the last found one first.  */
488       while (++j < old_count)
489         if (old_list[j] == thread) {
490           found = TRUE;
491           break;
492         }
493       if (!found) {
494         /* If not found, search in the rest (beginning) of the list.    */
495         for (j = 0; j < last_found; j++)
496           if (old_list[j] == thread) {
497             found = TRUE;
498             break;
499           }
500       }
501     }
502 
503     if (found) {
504       /* It is already in the list, skip processing, release mach port. */
505       mach_port_deallocate(my_task, thread);
506       continue;
507     }
508 
509     /* add it to the GC_mach_threads list */
510     if (GC_mach_threads_count == GC_MAX_MACH_THREADS)
511       ABORT("Too many threads");
512     GC_mach_threads[GC_mach_threads_count].thread = thread;
513     /* default is not suspended */
514     GC_mach_threads[GC_mach_threads_count].suspended = FALSE;
515     changed = TRUE;
516 
517 #   ifdef DEBUG_THREADS
518       GC_log_printf("Suspending %p\n", (void *)(word)thread);
519 #   endif
520     /* Unconditionally suspend the thread.  It will do no     */
521     /* harm if it is already suspended by the client logic.   */
522     GC_acquire_dirty_lock();
523     do {
524       kern_result = thread_suspend(thread);
525     } while (kern_result == KERN_ABORTED);
526     GC_release_dirty_lock();
527     if (kern_result != KERN_SUCCESS) {
528       /* The thread may have quit since the thread_threads() call we  */
529       /* mark already suspended so it's not dealt with anymore later. */
530       GC_mach_threads[GC_mach_threads_count].suspended = FALSE;
531     } else {
532       /* Mark the thread as suspended and require resume.     */
533       GC_mach_threads[GC_mach_threads_count].suspended = TRUE;
534       if (GC_on_thread_event)
535         GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED, (void *)(word)thread);
536     }
537     GC_mach_threads_count++;
538   }
539   return changed;
540 }
541 
542 #endif /* !GC_NO_THREADS_DISCOVERY */
543 
544 /* Caller holds allocation lock.        */
GC_stop_world(void)545 GC_INNER void GC_stop_world(void)
546 {
547   task_t my_task = current_task();
548   mach_port_t my_thread = mach_thread_self();
549   kern_return_t kern_result;
550 
551 # ifdef DEBUG_THREADS
552     GC_log_printf("Stopping the world from thread %p\n",
553                   (void *)(word)my_thread);
554 # endif
555 # ifdef PARALLEL_MARK
556     if (GC_parallel) {
557       /* Make sure all free list construction has stopped before we     */
558       /* start.  No new construction can start, since free list         */
559       /* construction is required to acquire and release the GC lock    */
560       /* before it starts, and we have the lock.                        */
561       GC_acquire_mark_lock();
562       GC_ASSERT(GC_fl_builder_count == 0);
563       /* We should have previously waited for it to become zero. */
564     }
565 # endif /* PARALLEL_MARK */
566 
567   if (GC_query_task_threads) {
568 #   ifndef GC_NO_THREADS_DISCOVERY
569       GC_bool changed;
570       thread_act_array_t act_list, prev_list;
571       mach_msg_type_number_t listcount, prevcount;
572 
573       /* Clear out the mach threads list table.  We do not need to      */
574       /* really clear GC_mach_threads[] as it is used only in the range */
575       /* from 0 to GC_mach_threads_count-1, inclusive.                  */
576       GC_mach_threads_count = 0;
577 
578       /* Loop stopping threads until you have gone over the whole list  */
579       /* twice without a new one appearing.  thread_create() won't      */
580       /* return (and thus the thread stop) until the new thread exists, */
581       /* so there is no window whereby you could stop a thread,         */
582       /* recognize it is stopped, but then have a new thread it created */
583       /* before stopping show up later.                                 */
584       changed = TRUE;
585       prev_list = NULL;
586       prevcount = 0;
587       do {
588         kern_result = task_threads(my_task, &act_list, &listcount);
589 
590         if (kern_result == KERN_SUCCESS) {
591           changed = GC_suspend_thread_list(act_list, listcount, prev_list,
592                                            prevcount, my_task, my_thread);
593 
594           if (prev_list != NULL) {
595             /* Thread ports are not deallocated by list, unused ports   */
596             /* deallocated in GC_suspend_thread_list, used - kept in    */
597             /* GC_mach_threads till GC_start_world as otherwise thread  */
598             /* object change can occur and GC_start_world will not      */
599             /* find the thread to resume which will cause app to hang.  */
600             vm_deallocate(my_task, (vm_address_t)prev_list,
601                           sizeof(thread_t) * prevcount);
602           }
603 
604           /* Repeat while having changes. */
605           prev_list = act_list;
606           prevcount = listcount;
607         }
608       } while (changed);
609 
610       GC_ASSERT(prev_list != 0);
611       /* The thread ports are not deallocated by list, see above.       */
612       vm_deallocate(my_task, (vm_address_t)act_list,
613                     sizeof(thread_t) * listcount);
614 #   endif /* !GC_NO_THREADS_DISCOVERY */
615 
616   } else {
617     unsigned i;
618 
619     for (i = 0; i < THREAD_TABLE_SZ; i++) {
620       GC_thread p;
621 
622       for (p = GC_threads[i]; p != NULL; p = p->next) {
623         if ((p->flags & FINISHED) == 0 && !p->thread_blocked &&
624              p->stop_info.mach_thread != my_thread) {
625           GC_acquire_dirty_lock();
626           do {
627             kern_result = thread_suspend(p->stop_info.mach_thread);
628           } while (kern_result == KERN_ABORTED);
629           GC_release_dirty_lock();
630           if (kern_result != KERN_SUCCESS)
631             ABORT("thread_suspend failed");
632           if (GC_on_thread_event)
633             GC_on_thread_event(GC_EVENT_THREAD_SUSPENDED,
634                                (void *)(word)p->stop_info.mach_thread);
635         }
636       }
637     }
638   }
639 
640 # ifdef MPROTECT_VDB
641     if (GC_auto_incremental) {
642       GC_mprotect_stop();
643     }
644 # endif
645 # ifdef PARALLEL_MARK
646     if (GC_parallel)
647       GC_release_mark_lock();
648 # endif
649 
650 # ifdef DEBUG_THREADS
651     GC_log_printf("World stopped from %p\n", (void *)(word)my_thread);
652 # endif
653   mach_port_deallocate(my_task, my_thread);
654 }
655 
GC_thread_resume(thread_act_t thread)656 GC_INLINE void GC_thread_resume(thread_act_t thread)
657 {
658   kern_return_t kern_result;
659 # if defined(DEBUG_THREADS) || defined(GC_ASSERTIONS)
660     struct thread_basic_info info;
661     mach_msg_type_number_t outCount = THREAD_BASIC_INFO_COUNT;
662 
663     kern_result = thread_info(thread, THREAD_BASIC_INFO,
664                               (thread_info_t)&info, &outCount);
665     if (kern_result != KERN_SUCCESS)
666       ABORT("thread_info failed");
667 # endif
668 # ifdef DEBUG_THREADS
669     GC_log_printf("Resuming thread %p with state %d\n", (void *)(word)thread,
670                   info.run_state);
671 # endif
672   /* Resume the thread */
673   kern_result = thread_resume(thread);
674   if (kern_result != KERN_SUCCESS) {
675     WARN("thread_resume(%p) failed: mach port invalid\n", thread);
676   } else if (GC_on_thread_event) {
677     GC_on_thread_event(GC_EVENT_THREAD_UNSUSPENDED, (void *)(word)thread);
678   }
679 }
680 
681 /* Caller holds allocation lock, and has held it continuously since     */
682 /* the world stopped.                                                   */
GC_start_world(void)683 GC_INNER void GC_start_world(void)
684 {
685   task_t my_task = current_task();
686 # ifdef DEBUG_THREADS
687     GC_log_printf("World starting\n");
688 # endif
689 # ifdef MPROTECT_VDB
690     if (GC_auto_incremental) {
691       GC_mprotect_resume();
692     }
693 # endif
694 
695   if (GC_query_task_threads) {
696 #   ifndef GC_NO_THREADS_DISCOVERY
697       int i, j;
698       kern_return_t kern_result;
699       thread_act_array_t act_list;
700       mach_msg_type_number_t listcount;
701 
702       kern_result = task_threads(my_task, &act_list, &listcount);
703       if (kern_result != KERN_SUCCESS)
704         ABORT("task_threads failed");
705 
706       j = (int)listcount;
707       for (i = 0; i < GC_mach_threads_count; i++) {
708         thread_act_t thread = GC_mach_threads[i].thread;
709 
710         if (GC_mach_threads[i].suspended) {
711           int last_found = j;   /* The thread index found during the    */
712                                 /* previous iteration (count value      */
713                                 /* means no thread found yet).          */
714 
715           /* Search for the thread starting from the last found one first. */
716           while (++j < (int)listcount) {
717             if (act_list[j] == thread)
718               break;
719           }
720           if (j >= (int)listcount) {
721             /* If not found, search in the rest (beginning) of the list. */
722             for (j = 0; j < last_found; j++) {
723               if (act_list[j] == thread)
724                 break;
725             }
726           }
727           if (j != last_found) {
728             /* The thread is alive, resume it.  */
729             GC_thread_resume(thread);
730           }
731         } else {
732           /* This thread was failed to be suspended by GC_stop_world,   */
733           /* no action needed.                                          */
734 #         ifdef DEBUG_THREADS
735             GC_log_printf("Not resuming thread %p as it is not suspended\n",
736                           (void *)(word)thread);
737 #         endif
738         }
739         mach_port_deallocate(my_task, thread);
740       }
741 
742       for (i = 0; i < (int)listcount; i++)
743         mach_port_deallocate(my_task, act_list[i]);
744       vm_deallocate(my_task, (vm_address_t)act_list,
745                     sizeof(thread_t) * listcount);
746 #   endif /* !GC_NO_THREADS_DISCOVERY */
747 
748   } else {
749     int i;
750     mach_port_t my_thread = mach_thread_self();
751 
752     for (i = 0; i < THREAD_TABLE_SZ; i++) {
753       GC_thread p;
754       for (p = GC_threads[i]; p != NULL; p = p->next) {
755         if ((p->flags & FINISHED) == 0 && !p->thread_blocked &&
756              p->stop_info.mach_thread != my_thread)
757           GC_thread_resume(p->stop_info.mach_thread);
758       }
759     }
760 
761     mach_port_deallocate(my_task, my_thread);
762   }
763 
764 # ifdef DEBUG_THREADS
765     GC_log_printf("World started\n");
766 # endif
767 }
768 
769 #endif /* GC_DARWIN_THREADS */
770