1 #include "private/pthread_support.h"
2 
3 /* This probably needs more porting work to ppc64. */
4 
5 # if defined(GC_DARWIN_THREADS)
6 
7 /* From "Inside Mac OS X - Mach-O Runtime Architecture" published by Apple
8    Page 49:
9    "The space beneath the stack pointer, where a new stack frame would normally
10    be allocated, is called the red zone. This area as shown in Figure 3-2 may
11    be used for any purpose as long as a new stack frame does not need to be
12    added to the stack."
13 
14    Page 50: "If a leaf procedure's red zone usage would exceed 224 bytes, then
15    it must set up a stack frame just like routines that call other routines."
16 */
17 #if defined(__ppc__)
18 # define PPC_RED_ZONE_SIZE 224
19 #elif defined(__ppc64__)
20 # define PPC_RED_ZONE_SIZE 320
21 #endif
22 
23 typedef struct StackFrame {
24   unsigned long	savedSP;
25   unsigned long	savedCR;
26   unsigned long	savedLR;
27   unsigned long	reserved[2];
28   unsigned long	savedRTOC;
29 } StackFrame;
30 
FindTopOfStack(unsigned long stack_start)31 unsigned long FindTopOfStack(unsigned long stack_start) {
32   StackFrame	*frame;
33 
34   if (stack_start == 0) {
35 # ifdef POWERPC
36 #   if CPP_WORDSZ == 32
37       __asm__ volatile("lwz	%0,0(r1)" : "=r" (frame));
38 #   else
39       __asm__ volatile("ld	%0,0(r1)" : "=r" (frame));
40 #   endif
41 # endif
42   } else {
43     frame = (StackFrame *)stack_start;
44   }
45 
46 # ifdef DEBUG_THREADS
47     /* GC_printf1("FindTopOfStack start at sp = %p\n", frame); */
48 # endif
49   do {
50     if (frame->savedSP == 0) break;
51     		/* if there are no more stack frames, stop */
52 
53     frame = (StackFrame*)frame->savedSP;
54 
55     /* we do these next two checks after going to the next frame
56        because the LR for the first stack frame in the loop
57        is not set up on purpose, so we shouldn't check it. */
58     if ((frame->savedLR & ~3) == 0) break; /* if the next LR is bogus, stop */
59     if ((~(frame->savedLR) & ~3) == 0) break; /* ditto */
60   } while (1);
61 
62 # ifdef DEBUG_THREADS
63     /* GC_printf1("FindTopOfStack finish at sp = %p\n", frame); */
64 # endif
65 
66   return (unsigned long)frame;
67 }
68 
69 #ifdef DARWIN_DONT_PARSE_STACK
GC_push_all_stacks()70 void GC_push_all_stacks() {
71   int i;
72   kern_return_t r;
73   GC_thread p;
74   pthread_t me;
75   ptr_t lo, hi;
76   GC_THREAD_STATE_T state;
77   mach_msg_type_number_t thread_state_count = GC_MACH_THREAD_STATE_COUNT;
78 
79   me = pthread_self();
80   if (!GC_thr_initialized) GC_thr_init();
81 
82   for(i=0;i<THREAD_TABLE_SZ;i++) {
83     for(p=GC_threads[i];p!=0;p=p->next) {
84       if(p -> flags & FINISHED) continue;
85       if(pthread_equal(p->id,me)) {
86 	lo = GC_approx_sp();
87       } else {
88 	/* Get the thread state (registers, etc) */
89 	r = thread_get_state(p->stop_info.mach_thread, GC_MACH_THREAD_STATE,
90 			     (natural_t*)&state, &thread_state_count);
91 	if(r != KERN_SUCCESS) ABORT("thread_get_state failed");
92 
93 #if defined(I386)
94 	lo = (void*)state . THREAD_FLD (esp);
95 
96 	GC_push_one(state . THREAD_FLD (eax));
97 	GC_push_one(state . THREAD_FLD (ebx));
98 	GC_push_one(state . THREAD_FLD (ecx));
99 	GC_push_one(state . THREAD_FLD (edx));
100 	GC_push_one(state . THREAD_FLD (edi));
101 	GC_push_one(state . THREAD_FLD (esi));
102 	GC_push_one(state . THREAD_FLD (ebp));
103 
104 #elif defined(X86_64)
105 	lo = (void*)state . THREAD_FLD (rsp);
106 
107 	GC_push_one(state . THREAD_FLD (rax));
108 	GC_push_one(state . THREAD_FLD (rbx));
109 	GC_push_one(state . THREAD_FLD (rcx));
110 	GC_push_one(state . THREAD_FLD (rdx));
111 	GC_push_one(state . THREAD_FLD (rdi));
112 	GC_push_one(state . THREAD_FLD (rsi));
113 	GC_push_one(state . THREAD_FLD (rbp));
114 	GC_push_one(state . THREAD_FLD (rsp));
115 	GC_push_one(state . THREAD_FLD (r8));
116 	GC_push_one(state . THREAD_FLD (r9));
117 	GC_push_one(state . THREAD_FLD (r10));
118 	GC_push_one(state . THREAD_FLD (r11));
119 	GC_push_one(state . THREAD_FLD (r12));
120 	GC_push_one(state . THREAD_FLD (r13));
121 	GC_push_one(state . THREAD_FLD (r14));
122 	GC_push_one(state . THREAD_FLD (r15));
123 	GC_push_one(state . THREAD_FLD (rip));
124 	GC_push_one(state . THREAD_FLD (rflags));
125 	GC_push_one(state . THREAD_FLD (cs));
126 	GC_push_one(state . THREAD_FLD (fs));
127 	GC_push_one(state . THREAD_FLD (gs));
128 
129 #elif defined(POWERPC)
130 	lo = (void*)(state . THREAD_FLD (r1) - PPC_RED_ZONE_SIZE);
131 
132 	GC_push_one(state . THREAD_FLD (r0));
133 	GC_push_one(state . THREAD_FLD (r2));
134 	GC_push_one(state . THREAD_FLD (r3));
135 	GC_push_one(state . THREAD_FLD (r4));
136 	GC_push_one(state . THREAD_FLD (r5));
137 	GC_push_one(state . THREAD_FLD (r6));
138 	GC_push_one(state . THREAD_FLD (r7));
139 	GC_push_one(state . THREAD_FLD (r8));
140 	GC_push_one(state . THREAD_FLD (r9));
141 	GC_push_one(state . THREAD_FLD (r10));
142 	GC_push_one(state . THREAD_FLD (r11));
143 	GC_push_one(state . THREAD_FLD (r12));
144 	GC_push_one(state . THREAD_FLD (r13));
145 	GC_push_one(state . THREAD_FLD (r14));
146 	GC_push_one(state . THREAD_FLD (r15));
147 	GC_push_one(state . THREAD_FLD (r16));
148 	GC_push_one(state . THREAD_FLD (r17));
149 	GC_push_one(state . THREAD_FLD (r18));
150 	GC_push_one(state . THREAD_FLD (r19));
151 	GC_push_one(state . THREAD_FLD (r20));
152 	GC_push_one(state . THREAD_FLD (r21));
153 	GC_push_one(state . THREAD_FLD (r22));
154 	GC_push_one(state . THREAD_FLD (r23));
155 	GC_push_one(state . THREAD_FLD (r24));
156 	GC_push_one(state . THREAD_FLD (r25));
157 	GC_push_one(state . THREAD_FLD (r26));
158 	GC_push_one(state . THREAD_FLD (r27));
159 	GC_push_one(state . THREAD_FLD (r28));
160 	GC_push_one(state . THREAD_FLD (r29));
161 	GC_push_one(state . THREAD_FLD (r30));
162 	GC_push_one(state . THREAD_FLD (r31));
163 #else
164 # error FIXME for non-x86 || ppc architectures
165 #endif
166       } /* p != me */
167       if(p->flags & MAIN_THREAD)
168 	hi = GC_stackbottom;
169       else
170 	hi = p->stack_end;
171 #if DEBUG_THREADS
172       GC_printf3("Darwin: Stack for thread 0x%lx = [%lx,%lx)\n",
173 		 (unsigned long) p -> id,
174 		 (unsigned long) lo,
175 		 (unsigned long) hi
176 		 );
177 #endif
178       GC_push_all_stack(lo,hi);
179     } /* for(p=GC_threads[i]...) */
180   } /* for(i=0;i<THREAD_TABLE_SZ...) */
181 }
182 
183 #else /* !DARWIN_DONT_PARSE_STACK; Use FindTopOfStack() */
184 
GC_push_all_stacks()185 void GC_push_all_stacks() {
186     int i;
187     kern_return_t r;
188     mach_port_t me;
189     ptr_t lo, hi;
190     thread_act_array_t act_list = 0;
191     mach_msg_type_number_t listcount = 0;
192 
193     me = mach_thread_self();
194     if (!GC_thr_initialized) GC_thr_init();
195 
196     r = task_threads(current_task(), &act_list, &listcount);
197     if(r != KERN_SUCCESS) ABORT("task_threads failed");
198     for(i = 0; i < listcount; i++) {
199       thread_act_t thread = act_list[i];
200       if (thread == me) {
201 	lo = GC_approx_sp();
202 	hi = (ptr_t)FindTopOfStack(0);
203       } else {
204 #     if defined(__ppc__) || defined(__ppc64__)
205 	GC_THREAD_STATE_T info;
206 	mach_msg_type_number_t outCount = THREAD_STATE_MAX;
207 	r = thread_get_state(thread, GC_MACH_THREAD_STATE,
208 			     (natural_t *)&info, &outCount);
209 	if(r != KERN_SUCCESS) ABORT("task_get_state failed");
210 
211 	lo = (void*)(info . THREAD_FLD (r1) - PPC_RED_ZONE_SIZE);
212 	hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (r1));
213 
214 	GC_push_one(info . THREAD_FLD (r0));
215 	GC_push_one(info . THREAD_FLD (r2));
216 	GC_push_one(info . THREAD_FLD (r3));
217 	GC_push_one(info . THREAD_FLD (r4));
218 	GC_push_one(info . THREAD_FLD (r5));
219 	GC_push_one(info . THREAD_FLD (r6));
220 	GC_push_one(info . THREAD_FLD (r7));
221 	GC_push_one(info . THREAD_FLD (r8));
222 	GC_push_one(info . THREAD_FLD (r9));
223 	GC_push_one(info . THREAD_FLD (r10));
224 	GC_push_one(info . THREAD_FLD (r11));
225 	GC_push_one(info . THREAD_FLD (r12));
226 	GC_push_one(info . THREAD_FLD (r13));
227 	GC_push_one(info . THREAD_FLD (r14));
228 	GC_push_one(info . THREAD_FLD (r15));
229 	GC_push_one(info . THREAD_FLD (r16));
230 	GC_push_one(info . THREAD_FLD (r17));
231 	GC_push_one(info . THREAD_FLD (r18));
232 	GC_push_one(info . THREAD_FLD (r19));
233 	GC_push_one(info . THREAD_FLD (r20));
234 	GC_push_one(info . THREAD_FLD (r21));
235 	GC_push_one(info . THREAD_FLD (r22));
236 	GC_push_one(info . THREAD_FLD (r23));
237 	GC_push_one(info . THREAD_FLD (r24));
238 	GC_push_one(info . THREAD_FLD (r25));
239 	GC_push_one(info . THREAD_FLD (r26));
240 	GC_push_one(info . THREAD_FLD (r27));
241 	GC_push_one(info . THREAD_FLD (r28));
242 	GC_push_one(info . THREAD_FLD (r29));
243 	GC_push_one(info . THREAD_FLD (r30));
244 	GC_push_one(info . THREAD_FLD (r31));
245 #      else
246 	/* FIXME: Remove after testing:	*/
247 	WARN("This is completely untested and likely will not work\n", 0);
248 	GC_THREAD_STATE_T info;
249 	mach_msg_type_number_t outCount = THREAD_STATE_MAX;
250 	r = thread_get_state(thread, GC_MACH_THREAD_STATE, (natural_t *)&info,
251 			     &outCount);
252 	if(r != KERN_SUCCESS) ABORT("task_get_state failed");
253 
254 	lo = (void*)info . THREAD_FLD (esp);
255 	hi = (ptr_t)FindTopOfStack(info . THREAD_FLD (esp));
256 
257 	GC_push_one(info . THREAD_FLD (eax));
258 	GC_push_one(info . THREAD_FLD (ebx));
259 	GC_push_one(info . THREAD_FLD (ecx));
260 	GC_push_one(info . THREAD_FLD (edx));
261 	GC_push_one(info . THREAD_FLD (edi));
262 	GC_push_one(info . THREAD_FLD (esi));
263 	/* GC_push_one(info . THREAD_FLD (ebp));  */
264 	/* GC_push_one(info . THREAD_FLD (esp));  */
265 	GC_push_one(info . THREAD_FLD (ss));
266 	GC_push_one(info . THREAD_FLD (eip));
267 	GC_push_one(info . THREAD_FLD (cs));
268 	GC_push_one(info . THREAD_FLD (ds));
269 	GC_push_one(info . THREAD_FLD (es));
270 	GC_push_one(info . THREAD_FLD (fs));
271 	GC_push_one(info . THREAD_FLD (gs));
272 #      endif /* !POWERPC */
273       }
274 #     if DEBUG_THREADS
275        GC_printf3("Darwin: Stack for thread 0x%lx = [%lx,%lx)\n",
276 		  (unsigned long) thread,
277 		  (unsigned long) lo,
278 		  (unsigned long) hi
279 		 );
280 #     endif
281       GC_push_all_stack(lo, hi);
282     } /* for(p=GC_threads[i]...) */
283     vm_deallocate(current_task(), (vm_address_t)act_list, sizeof(thread_t) * listcount);
284 }
285 #endif /* !DARWIN_DONT_PARSE_STACK */
286 
287 static mach_port_t GC_mach_handler_thread;
288 static int GC_use_mach_handler_thread = 0;
289 
290 static struct GC_mach_thread GC_mach_threads[THREAD_TABLE_SZ];
291 static int GC_mach_threads_count;
292 
GC_stop_init()293 void GC_stop_init() {
294   int i;
295 
296   for (i = 0; i < THREAD_TABLE_SZ; i++) {
297     GC_mach_threads[i].thread = 0;
298     GC_mach_threads[i].already_suspended = 0;
299   }
300   GC_mach_threads_count = 0;
301 }
302 
303 /* returns true if there's a thread in act_list that wasn't in old_list */
GC_suspend_thread_list(thread_act_array_t act_list,int count,thread_act_array_t old_list,int old_count)304 int GC_suspend_thread_list(thread_act_array_t act_list, int count,
305 			   thread_act_array_t old_list, int old_count) {
306   mach_port_t my_thread = mach_thread_self();
307   int i, j;
308 
309   int changed = 0;
310 
311   for(i = 0; i < count; i++) {
312     thread_act_t thread = act_list[i];
313 #   if DEBUG_THREADS
314       GC_printf1("Attempting to suspend thread %p\n", thread);
315 #   endif
316     /* find the current thread in the old list */
317     int found = 0;
318     for(j = 0; j < old_count; j++) {
319       thread_act_t old_thread = old_list[j];
320       if (old_thread == thread) {
321 	found = 1;
322 	break;
323       }
324     }
325     if (!found) {
326       /* add it to the GC_mach_threads list */
327       GC_mach_threads[GC_mach_threads_count].thread = thread;
328       /* default is not suspended */
329       GC_mach_threads[GC_mach_threads_count].already_suspended = 0;
330       changed = 1;
331     }
332 
333     if (thread != my_thread &&
334 	(!GC_use_mach_handler_thread
335 	 || (GC_use_mach_handler_thread
336 	     && GC_mach_handler_thread != thread))) {
337       struct thread_basic_info info;
338       mach_msg_type_number_t outCount = THREAD_INFO_MAX;
339       kern_return_t kern_result = thread_info(thread, THREAD_BASIC_INFO,
340 				(thread_info_t)&info, &outCount);
341       if(kern_result != KERN_SUCCESS) {
342 	/* the thread may have quit since the thread_threads () call
343 	 * we mark already_suspended so it's not dealt with anymore later
344 	 */
345         if (!found) {
346 	  GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
347     	  GC_mach_threads_count++;
348 	}
349 	continue;
350       }
351 #     if DEBUG_THREADS
352         GC_printf2("Thread state for 0x%lx = %d\n", thread, info.run_state);
353 #     endif
354       if (!found) {
355 	GC_mach_threads[GC_mach_threads_count].already_suspended = info.suspend_count;
356       }
357       if (info.suspend_count) continue;
358 
359 #     if DEBUG_THREADS
360         GC_printf1("Suspending 0x%lx\n", thread);
361 #     endif
362       /* Suspend the thread */
363       kern_result = thread_suspend(thread);
364       if(kern_result != KERN_SUCCESS) {
365 	/* the thread may have quit since the thread_threads () call
366 	 * we mark already_suspended so it's not dealt with anymore later
367 	 */
368         if (!found) {
369 	  GC_mach_threads[GC_mach_threads_count].already_suspended = TRUE;
370     	  GC_mach_threads_count++;
371 	}
372 	continue;
373       }
374     }
375     if (!found) GC_mach_threads_count++;
376   }
377   return changed;
378 }
379 
380 
381 /* Caller holds allocation lock.	*/
GC_stop_world()382 void GC_stop_world()
383 {
384   int i, changes;
385     GC_thread p;
386     mach_port_t my_thread = mach_thread_self();
387     kern_return_t kern_result;
388     thread_act_array_t act_list, prev_list;
389     mach_msg_type_number_t listcount, prevcount;
390 
391 #   if DEBUG_THREADS
392       GC_printf1("Stopping the world from 0x%lx\n", mach_thread_self());
393 #   endif
394 
395     /* clear out the mach threads list table */
396     GC_stop_init();
397 
398     /* Make sure all free list construction has stopped before we start. */
399     /* No new construction can start, since free list construction is	*/
400     /* required to acquire and release the GC lock before it starts,	*/
401     /* and we have the lock.						*/
402 #   ifdef PARALLEL_MARK
403       GC_acquire_mark_lock();
404       GC_ASSERT(GC_fl_builder_count == 0);
405       /* We should have previously waited for it to become zero. */
406 #   endif /* PARALLEL_MARK */
407 
408       /* Loop stopping threads until you have gone over the whole list
409 	 twice without a new one appearing. thread_create() won't
410 	 return (and thus the thread stop) until the new thread
411 	 exists, so there is no window whereby you could stop a
412 	 thread, recognise it is stopped, but then have a new thread
413 	 it created before stopping show up later.
414       */
415 
416       changes = 1;
417       prev_list = NULL;
418       prevcount = 0;
419       do {
420 	int result;
421 	kern_result = task_threads(current_task(), &act_list, &listcount);
422 	result = GC_suspend_thread_list(act_list, listcount,
423 					prev_list, prevcount);
424 	changes = result;
425 	prev_list = act_list;
426 	prevcount = listcount;
427         vm_deallocate(current_task(), (vm_address_t)act_list, sizeof(thread_t) * listcount);
428       } while (changes);
429 
430 
431 #   ifdef MPROTECT_VDB
432       if(GC_incremental) {
433         extern void GC_mprotect_stop();
434         GC_mprotect_stop();
435       }
436 #   endif
437 
438 #   ifdef PARALLEL_MARK
439       GC_release_mark_lock();
440 #   endif
441     #if DEBUG_THREADS
442       GC_printf1("World stopped from 0x%lx\n", my_thread);
443     #endif
444 }
445 
446 /* Caller holds allocation lock, and has held it continuously since	*/
447 /* the world stopped.							*/
GC_start_world()448 void GC_start_world()
449 {
450   mach_port_t my_thread = mach_thread_self();
451   int i, j;
452   GC_thread p;
453   kern_return_t kern_result;
454   thread_act_array_t act_list;
455   mach_msg_type_number_t listcount;
456   struct thread_basic_info info;
457   mach_msg_type_number_t outCount = THREAD_INFO_MAX;
458 
459 #   if DEBUG_THREADS
460       GC_printf0("World starting\n");
461 #   endif
462 
463 #   ifdef MPROTECT_VDB
464       if(GC_incremental) {
465         extern void GC_mprotect_resume();
466         GC_mprotect_resume();
467       }
468 #   endif
469 
470     kern_result = task_threads(current_task(), &act_list, &listcount);
471     for(i = 0; i < listcount; i++) {
472       thread_act_t thread = act_list[i];
473       if (thread != my_thread &&
474 	  (!GC_use_mach_handler_thread ||
475 	   (GC_use_mach_handler_thread && GC_mach_handler_thread != thread))) {
476 	for(j = 0; j < GC_mach_threads_count; j++) {
477 	  if (thread == GC_mach_threads[j].thread) {
478 	    if (GC_mach_threads[j].already_suspended) {
479 #             if DEBUG_THREADS
480 	        GC_printf1("Not resuming already suspended thread %p\n", thread);
481 #             endif
482 	      continue;
483 	    }
484 	    kern_result = thread_info(thread, THREAD_BASIC_INFO,
485 				      (thread_info_t)&info, &outCount);
486 	    if(kern_result != KERN_SUCCESS) ABORT("thread_info failed");
487 #           if DEBUG_THREADS
488 	      GC_printf2("Thread state for 0x%lx = %d\n", thread,
489 			 info.run_state);
490 	      GC_printf1("Resuming 0x%lx\n", thread);
491 #           endif
492 	    /* Resume the thread */
493 	    kern_result = thread_resume(thread);
494 	    if(kern_result != KERN_SUCCESS) ABORT("thread_resume failed");
495 	  }
496 	}
497       }
498     }
499     vm_deallocate(current_task(), (vm_address_t)act_list, sizeof(thread_t) * listcount);
500 #   if DEBUG_THREADS
501      GC_printf0("World started\n");
502 #   endif
503 }
504 
GC_darwin_register_mach_handler_thread(mach_port_t thread)505 void GC_darwin_register_mach_handler_thread(mach_port_t thread) {
506   GC_mach_handler_thread = thread;
507   GC_use_mach_handler_thread = 1;
508 }
509 
510 #endif
511