1 /*
2 Provides:
3 Mach-based allocator (uses alloc_cache.c)
4 macosx_init_exception_handler() --- installs fault handler
5 size_type -- the type of the heap size
6 determine_max_heap_size()
7 Requires:
8 TEST = 0
9 GENERATIONS --- zero or non-zero
10 designate_modified --- when GENERATIONS is non-zero
11 Optional:
12 DONT_NEED_MAX_HEAP_SIZE --- to disable a provide
13 */
14
15 #include <sys/time.h>
16 #include <sys/resource.h>
17 #include <unistd.h>
18 #include <mach/mach.h>
19 #include <mach/mach_error.h>
20 #if defined(__POWERPC__) && 0
21 # define PPC_HAND_ROLLED_THREAD
22 #endif
23 #ifdef PPC_HAND_ROLLED_THREAD
24 # include <architecture/ppc/cframe.h>
25 #else
26 # include <pthread.h>
27 #endif
28
29 # if GENERATIONS
30 static int designate_modified(void *p);
31 # endif
32 # define TEST 0
33 #ifndef TEST
34 # define TEST 1
35 int designate_modified(void *p);
36 #endif
37
38 #if __DARWIN_UNIX03
39 # define THREAD_FLD(x) __ ## x
40 #else
41 # define THREAD_FLD(x) x
42 #endif
43
44 #if defined(MZ_USE_PLACES)
45 typedef struct OSXThreadData {
46 struct OSXThreadData *next;
47 mach_port_t thread_port_id;
48 Thread_Local_Variables *tlvs;
49 } OSXThreadData;
50
51 /* static const int OSX_THREAD_TABLE_SIZE = 256; */
52 #define OSX_THREAD_TABLE_SIZE 256
53 static OSXThreadData *osxthreads[OSX_THREAD_TABLE_SIZE];
54 static pthread_mutex_t osxthreadsmutex = PTHREAD_MUTEX_INITIALIZER;
55
get_mach_thread_tlvs(mach_port_t threadid)56 static Thread_Local_Variables *get_mach_thread_tlvs(mach_port_t threadid) {
57 int index = threadid % OSX_THREAD_TABLE_SIZE;
58 OSXThreadData *thread;
59 Thread_Local_Variables *tlvs = NULL;
60
61 pthread_mutex_lock(&osxthreadsmutex);
62 {
63 for (thread = osxthreads[index]; thread; thread = thread->next)
64 {
65 if (thread->thread_port_id == threadid) {
66 tlvs = thread->tlvs;
67 break;
68 }
69 }
70 }
71 pthread_mutex_unlock(&osxthreadsmutex);
72
73 return tlvs;
74 }
75
set_thread_locals_from_mach_thread_id(mach_port_t threadid)76 static void set_thread_locals_from_mach_thread_id(mach_port_t threadid) {
77 Thread_Local_Variables *tlvs = get_mach_thread_tlvs(threadid);
78 #ifdef USE_THREAD_LOCAL
79 pthread_setspecific(scheme_thread_local_key, tlvs);
80 #endif
81 }
82
register_mach_thread()83 static void register_mach_thread() {
84 mach_port_t thread_self = mach_thread_self();
85 int index = thread_self % OSX_THREAD_TABLE_SIZE;
86 OSXThreadData * thread = malloc(sizeof(OSXThreadData));
87
88 thread->thread_port_id = thread_self;
89 thread->tlvs = scheme_get_thread_local_variables();
90
91 /* PUSH thread record onto osxthreads datastructure */
92 pthread_mutex_lock(&osxthreadsmutex);
93 {
94 thread->next = osxthreads[index];
95 osxthreads[index] = thread;
96 }
97 pthread_mutex_unlock(&osxthreadsmutex);
98 }
99
unregister_mach_thread()100 static void unregister_mach_thread() {
101 mach_port_t thread_self = mach_thread_self();
102 int index = thread_self % OSX_THREAD_TABLE_SIZE;
103 OSXThreadData * thread, *prev = NULL;
104
105 pthread_mutex_lock(&osxthreadsmutex);
106 thread = osxthreads[index];
107 while (thread->thread_port_id != thread_self) {
108 prev = thread;
109 thread = thread->next;
110 }
111 if (thread) {
112 if (prev)
113 prev->next = thread->next;
114 else
115 osxthreads[index] = thread->next;
116 free(thread);
117 }
118 pthread_mutex_unlock(&osxthreadsmutex);
119 }
120
121 #endif
122
123 #if defined(__POWERPC__)
124 # define ARCH_thread_state_t ppc_thread_state_t
125 # define ARCH_THREAD_STATE PPC_THREAD_STATE
126 # define ARCH_THREAD_STATE_COUNT PPC_THREAD_STATE_COUNT
127 #elif defined(__arm__)
128 # define ARCH_thread_state_t arm_thread_state_t
129 # define ARCH_THREAD_STATE ARM_THREAD_STATE
130 # define ARCH_THREAD_STATE_COUNT ARM_THREAD_STATE_COUNT
131 #elif defined(__x86_64__)
132 # define ARCH_exception_state_t x86_exception_state64_t
133 # define ARCH_EXCEPTION_STATE x86_EXCEPTION_STATE64
134 # define ARCH_EXCEPTION_STATE_COUNT x86_EXCEPTION_STATE64_COUNT
135 # define ARCH_thread_state_t x86_thread_state64_t
136 # define ARCH_THREAD_STATE x86_THREAD_STATE64
137 # define ARCH_THREAD_STATE_COUNT x86_THREAD_STATE64_COUNT
138 # define USE_THREAD_STATE
139 # include <mach/thread_status.h>
140 # include <mach/exception.h>
141 #elif defined(__arm64__)
142 # define ARCH_exception_state_t arm_exception_state64_t
143 # define ARCH_EXCEPTION_STATE ARM_EXCEPTION_STATE64
144 # define ARCH_EXCEPTION_STATE_COUNT ARM_EXCEPTION_STATE64_COUNT
145 # define ARCH_thread_state_t arm_thread_state64_t
146 # define ARCH_THREAD_STATE ARM_THREAD_STATE64
147 # define ARCH_THREAD_STATE_COUNT ARM_THREAD_STATE64_COUNT
148 # define ARCH_FAULTVADDR_FIELD THREAD_FLD(far)
149 # define USE_THREAD_STATE
150 # include <mach/thread_status.h>
151 # include <mach/exception.h>
152 #else
153 # define ARCH_thread_state_t i386_thread_state_t
154 # define ARCH_THREAD_STATE i386_THREAD_STATE
155 # define ARCH_THREAD_STATE_COUNT i386_THREAD_STATE_COUNT
156 #endif
157
158 #ifndef ARCH_FAULTVADDR_FIELD
159 # define ARCH_FAULTVADDR_FIELD THREAD_FLD(faultvaddr)
160 #endif
161
162 /* the structure of an exception msg and its reply */
163 typedef struct rep_msg {
164 mach_msg_header_t head;
165 NDR_record_t NDR;
166 kern_return_t ret_code;
167 } mach_reply_msg_t;
168
169 typedef struct exc_msg {
170 mach_msg_header_t head;
171 /* start of the kernel processed data */
172 mach_msg_body_t msgh_body;
173 mach_msg_port_descriptor_t thread;
174 mach_msg_port_descriptor_t task;
175 /* end of the kernel processed data */
176 NDR_record_t NDR;
177 exception_type_t exception;
178 mach_msg_type_number_t code_cnt;
179 exception_data_t code;
180 /* some padding */
181 char pad[512];
182 } mach_exc_msg_t;
183
184 /* this is a neat little mach callback */
185 extern boolean_t exc_server(mach_msg_header_t *in, mach_msg_header_t *out);
186
187 /* these are the globals everyone needs */
188 #define page_size vm_page_size
189 static mach_port_t task_self = 0;
190 static mach_port_t exc_port = 0;
191
192 /* the VM subsystem as defined by the GC files */
os_alloc_pages(size_t len)193 static void *os_alloc_pages(size_t len)
194 {
195 kern_return_t retval;
196 void *r;
197
198 if(!task_self) task_self = mach_task_self();
199
200 /* round up to the nearest page: */
201 if(len & (page_size - 1))
202 len += page_size - (len & (page_size - 1));
203
204 retval = vm_allocate(task_self, (vm_address_t*)&r, len, TRUE);
205 if(retval != KERN_SUCCESS) {
206 if (retval == KERN_NO_SPACE)
207 return NULL;
208 GCPRINT(GCOUTF, "Couldn't allocate memory: %s\n", mach_error_string(retval));
209 abort();
210 }
211
212 return r;
213 }
214
os_free_pages(void * p,size_t len)215 static void os_free_pages(void *p, size_t len)
216 {
217 kern_return_t retval;
218
219 retval = vm_deallocate(task_self, (vm_address_t)p, len);
220 if(retval != KERN_SUCCESS) {
221 GCPRINT(GCOUTF, "WARNING: couldn't deallocate page %p: %s\n", p,
222 mach_error_string(retval));
223 }
224 }
225
os_protect_pages(void * p,size_t len,int writeable)226 static void os_protect_pages(void *p, size_t len, int writeable)
227 {
228 kern_return_t retval;
229
230 if(len & (page_size - 1)) {
231 len += page_size - (len & (page_size - 1));
232 }
233
234 retval = vm_protect(task_self, (vm_address_t)p, len, FALSE,
235 (VM_PROT_READ | (writeable ? VM_PROT_WRITE : 0)));
236 if(retval != KERN_SUCCESS) {
237 GCPRINT(GCOUTF, "WARNING: couldn't protect %li bytes of page %p%s\n",
238 len, p, mach_error_string(retval));
239 abort();
240 }
241 }
242
243 #ifndef DONT_NEED_MAX_HEAP_SIZE
244
determine_max_heap_size()245 static unsigned long determine_max_heap_size()
246 {
247 struct rlimit rlim;
248
249 getrlimit(RLIMIT_RSS, &rlim);
250 return (rlim.rlim_cur == RLIM_INFINITY) ? (unsigned long)-1 : rlim.rlim_cur;
251 }
252 #endif
253
254 /* The catch_exception_raise() functions are treated specially by the
255 linker, and Mach looks them up at run time. We provide
256 GC_... variants due to linker confusion when the implementation of
257 these are in a framework instead of the main binary, so that the
258 main binary needs to define them and jump to the implemenations
259 here. (This linker problem seems to occur when we use
260 -mmacosx-version-min.) */
261
GC_catch_exception_raise_state(mach_port_t port,exception_type_t exception_type,exception_data_t exception_data,mach_msg_type_number_t data_cnt,thread_state_flavor_t * flavor,thread_state_t in_state,mach_msg_type_number_t is_cnt,thread_state_t out_state,mach_msg_type_number_t os_cnt)262 kern_return_t GC_catch_exception_raise_state(mach_port_t port,
263 exception_type_t exception_type,
264 exception_data_t exception_data,
265 mach_msg_type_number_t data_cnt,
266 thread_state_flavor_t *flavor,
267 thread_state_t in_state,
268 mach_msg_type_number_t is_cnt,
269 thread_state_t out_state,
270 mach_msg_type_number_t os_cnt)
271 {
272 return KERN_FAILURE;
273 }
274
catch_exception_raise_state(mach_port_t port,exception_type_t exception_type,exception_data_t exception_data,mach_msg_type_number_t data_cnt,thread_state_flavor_t * flavor,thread_state_t in_state,mach_msg_type_number_t is_cnt,thread_state_t out_state,mach_msg_type_number_t os_cnt)275 kern_return_t catch_exception_raise_state(mach_port_t port,
276 exception_type_t exception_type,
277 exception_data_t exception_data,
278 mach_msg_type_number_t data_cnt,
279 thread_state_flavor_t *flavor,
280 thread_state_t in_state,
281 mach_msg_type_number_t is_cnt,
282 thread_state_t out_state,
283 mach_msg_type_number_t os_cnt)
284 {
285 return GC_catch_exception_raise_state(port, exception_type, exception_data,
286 data_cnt, flavor,
287 in_state, is_cnt,
288 out_state, os_cnt);
289 }
290
GC_catch_exception_raise_state_identitity(mach_port_t port,mach_port_t thread_port,mach_port_t task_port,exception_type_t exception_type,exception_data_t exception_data,mach_msg_type_number_t data_count,thread_state_flavor_t * state_flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t out_state_count)291 kern_return_t GC_catch_exception_raise_state_identitity
292 (mach_port_t port, mach_port_t thread_port, mach_port_t task_port,
293 exception_type_t exception_type, exception_data_t exception_data,
294 mach_msg_type_number_t data_count, thread_state_flavor_t *state_flavor,
295 thread_state_t in_state, mach_msg_type_number_t in_state_count,
296 thread_state_t out_state, mach_msg_type_number_t out_state_count)
297 {
298 return KERN_FAILURE;
299 }
300
catch_exception_raise_state_identitity(mach_port_t port,mach_port_t thread_port,mach_port_t task_port,exception_type_t exception_type,exception_data_t exception_data,mach_msg_type_number_t data_count,thread_state_flavor_t * state_flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t out_state_count)301 kern_return_t catch_exception_raise_state_identitity
302 (mach_port_t port, mach_port_t thread_port, mach_port_t task_port,
303 exception_type_t exception_type, exception_data_t exception_data,
304 mach_msg_type_number_t data_count, thread_state_flavor_t *state_flavor,
305 thread_state_t in_state, mach_msg_type_number_t in_state_count,
306 thread_state_t out_state, mach_msg_type_number_t out_state_count)
307 {
308 return GC_catch_exception_raise_state_identitity(port, thread_port, task_port,
309 exception_type, exception_data,
310 data_count, state_flavor,
311 in_state, in_state_count,
312 out_state, out_state_count);
313 }
314
GC_catch_exception_raise(mach_port_t port,mach_port_t thread_port,mach_port_t task_port,exception_type_t exception_type,exception_data_t exception_data,mach_msg_type_number_t data_count)315 kern_return_t GC_catch_exception_raise(mach_port_t port,
316 mach_port_t thread_port,
317 mach_port_t task_port,
318 exception_type_t exception_type,
319 exception_data_t exception_data,
320 mach_msg_type_number_t data_count)
321 {
322 #if GENERATIONS
323 /* kernel return value is in exception_data[0], faulting address in
324 exception_data[1] */
325 if(exception_data[0] == KERN_PROTECTION_FAILURE) {
326 void *p;
327 #ifndef USE_THREAD_STATE
328 p = (void*)exception_data[1];
329 #else
330 /* We have to do it this way for 64-bit mode: */
331 ARCH_exception_state_t exc_state;
332 mach_msg_type_number_t exc_state_count = ARCH_EXCEPTION_STATE_COUNT;
333 (void)thread_get_state(thread_port, ARCH_EXCEPTION_STATE, (natural_t*)&exc_state,
334 &exc_state_count);
335 p = (void *)exc_state.ARCH_FAULTVADDR_FIELD;
336 #endif
337
338 #if defined(MZ_USE_PLACES)
339 set_thread_locals_from_mach_thread_id(thread_port);
340 #endif
341
342 if (designate_modified(p))
343 return KERN_SUCCESS;
344 else
345 return KERN_FAILURE;
346 } else
347 #endif
348 return KERN_FAILURE;
349 }
350
catch_exception_raise(mach_port_t port,mach_port_t thread_port,mach_port_t task_port,exception_type_t exception_type,exception_data_t exception_data,mach_msg_type_number_t data_count)351 kern_return_t catch_exception_raise(mach_port_t port,
352 mach_port_t thread_port,
353 mach_port_t task_port,
354 exception_type_t exception_type,
355 exception_data_t exception_data,
356 mach_msg_type_number_t data_count)
357 {
358 return GC_catch_exception_raise(port, thread_port, task_port,
359 exception_type, exception_data, data_count);
360 }
361
362 /* this is the thread which forwards of exceptions read from the exception
363 server off to our exception catchers and then back out to the other
364 thread */
exception_thread(void * shared_thread_state)365 void exception_thread(void *shared_thread_state)
366 {
367 mach_msg_header_t *message;
368 mach_msg_header_t *reply;
369 kern_return_t retval;
370
371 #ifdef USE_THREAD_LOCAL
372 pthread_setspecific(scheme_thread_local_key, shared_thread_state);
373 #endif
374
375 /* allocate the space for the message and reply */
376 message = (mach_msg_header_t*)malloc(sizeof(mach_exc_msg_t));
377 reply = (mach_msg_header_t*)malloc(sizeof(mach_reply_msg_t));
378 /* do this loop forever */
379 while(1) {
380 /* block until we get an exception message */
381 retval = mach_msg(message, MACH_RCV_MSG, 0, sizeof(mach_exc_msg_t),
382 exc_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
383 if(retval != KERN_SUCCESS) {
384 GCPRINT(GCOUTF, "Message receive failed: %s\n", mach_error_string(retval));
385 abort();
386 }
387 /* forward off the handling of this message */
388 if(!exc_server(message, reply)) {
389 GCPRINT(GCOUTF, "INTERNAL ERROR: exc_server() didn't like something\n");
390 abort();
391 }
392 /* send the message back out to the thread */
393 retval = mach_msg(reply, MACH_SEND_MSG, sizeof(mach_reply_msg_t), 0,
394 MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
395 if(retval != KERN_SUCCESS) {
396 GCPRINT(GCOUTF, "Message send failed: %s\n", mach_error_string(retval));
397 abort();
398 }
399 }
400 }
401
GC_attach_current_thread_exceptions_to_handler()402 void GC_attach_current_thread_exceptions_to_handler()
403 {
404 mach_port_t thread_self, exc_port_s;
405 mach_msg_type_name_t type;
406 kern_return_t retval;
407
408 if (!task_self) return;
409
410 /* get ids for ourself */
411 thread_self = mach_thread_self();
412
413 /* extract out the send rights for that port, which the OS needs */
414 retval = mach_port_extract_right(task_self, exc_port, MACH_MSG_TYPE_MAKE_SEND,
415 &exc_port_s, &type);
416 if(retval != KERN_SUCCESS) {
417 GCPRINT(GCOUTF, "Couldn't extract send rights: %s\n", mach_error_string(retval));
418 abort();
419 }
420
421 /* set the exception ports for this thread to the above */
422 retval = thread_set_exception_ports(thread_self, EXC_MASK_BAD_ACCESS,
423 exc_port_s, EXCEPTION_DEFAULT,
424 ARCH_THREAD_STATE);
425 if(retval != KERN_SUCCESS) {
426 GCPRINT(GCOUTF, "Couldn't set exception ports: %s\n", mach_error_string(retval));
427 abort();
428 }
429 #if defined(MZ_USE_PLACES)
430 register_mach_thread();
431 #endif
432 }
433
GC_detach_current_thread_exceptions_from_handler()434 void GC_detach_current_thread_exceptions_from_handler()
435 {
436 #if defined(MZ_USE_PLACES)
437 unregister_mach_thread();
438 #endif
439 }
440
441 /* this initializes the subsystem (sets the exception port, starts the
442 exception handling thread, etc) */
macosx_init_exception_handler(int isMASTERGC)443 static void macosx_init_exception_handler(int isMASTERGC)
444 {
445 kern_return_t retval;
446
447 /* Note: the `designate_modified` function relies on the fact that
448 all exceptions (at least within a place) go through the same
449 handler thread, so it can skip the lock on modified pages. */
450
451 if (!isMASTERGC) {
452 GC_attach_current_thread_exceptions_to_handler();
453 return;
454 }
455
456 if(!task_self) task_self = mach_task_self();
457
458 /* allocate the port we're going to get exceptions on */
459 retval = mach_port_allocate(task_self, MACH_PORT_RIGHT_RECEIVE, &exc_port);
460 if(retval != KERN_SUCCESS) {
461 GCPRINT(GCOUTF, "Couldn't allocate exception port: %s\n",
462 mach_error_string(retval));
463 abort();
464 }
465
466 GC_attach_current_thread_exceptions_to_handler();
467
468 #ifdef PPC_HAND_ROLLED_THREAD
469 /* Old hand-rolled thread creation. */
470 {
471 /* set up the subthread */
472 mach_port_t exc_thread;
473 ARCH_thread_state_t *exc_thread_state;
474 void *subthread_stack;
475
476 retval = thread_create(task_self, &exc_thread);
477 if(retval != KERN_SUCCESS) {
478 GCPRINT(GCOUTF, "Couldn't create exception thread: %s\n", mach_error_string(retval));
479 abort();
480 }
481 subthread_stack = (void*)malloc(page_size);
482 subthread_stack += (page_size - C_ARGSAVE_LEN - C_RED_ZONE);
483 exc_thread_state = (ARCH_thread_state_t*)malloc(sizeof(ARCH_thread_state_t));
484 exc_thread_state->srr0 = (unsigned int)exception_thread;
485 exc_thread_state->r1 = (unsigned int)subthread_stack;
486 retval = thread_set_state(exc_thread, ARCH_THREAD_STATE,
487 (thread_state_t)exc_thread_state,
488 ARCH_THREAD_STATE_COUNT);
489 if(retval != KERN_SUCCESS) {
490 GCPRINT(GCOUTF, "Couldn't set subthread state: %s\n", mach_error_string(retval));
491 abort();
492 }
493 retval = thread_resume(exc_thread);
494 if(retval != KERN_SUCCESS) {
495 GCPRINT(GCOUTF, "Couldn't resume subthread: %s\n", mach_error_string(retval));
496 abort();
497 }
498 }
499 #else
500 {
501 pthread_t th;
502 void *data = NULL;
503 #ifdef USE_THREAD_LOCAL
504 data = pthread_getspecific(scheme_thread_local_key);
505 #endif
506 pthread_create(&th, NULL, (void *(*)(void *))exception_thread, data);
507 }
508 #endif
509 }
510
511 #if TEST
512 #define MPAGE_SIZE 16384
513 #define BPAGE_SIZE 20034
514
515 char *normal_page = NULL;
516 char *big_page = NULL;
517
designate_modified(void * p)518 int designate_modified(void *p)
519 {
520 if((p >= normal_page) && (p < (normal_page + MPAGE_SIZE))) {
521 vm_protect_pages(p, MPAGE_SIZE, 1);
522 return 1;
523 }
524 if((p >= big_page) && (p < (big_page + BPAGE_SIZE))) {
525 vm_protect_pages(p, BPAGE_SIZE, 1);
526 return 1;
527 }
528 printf("Unrecognized write: %p\n", p);
529 return 0;
530 }
531
main(int argc,char ** argv)532 int main(int argc, char **argv)
533 {
534 macosx_init_exception_handler();
535 printf("Allocating test pages:\n");
536 normal_page = vm_malloc_pages(MPAGE_SIZE, MPAGE_SIZE,0);
537 printf(" ... normal page at %p\n", normal_page);
538 big_page = vm_malloc_pages(BPAGE_SIZE, MPAGE_SIZE,0);
539 printf(" ... big page at %p\n", big_page);
540 printf("Setting protection on test pages\n");
541 vm_protect_pages(normal_page, MPAGE_SIZE, 0);
542 printf(" ... normal page %p set\n", normal_page);
543 vm_protect_pages(big_page, MPAGE_SIZE, 0);
544 printf(" ... big page %p set\n", big_page);
545 printf("Writing to test pages\n");
546 normal_page[2] = 'A';
547 big_page[2] = 'A';
548 printf("Reading from test pages:\n");
549 printf(" ... normal_page %p's second byte is %c\n", normal_page, normal_page[2]);
550 printf(" ... big_page %p's second byte is %c\n", big_page, big_page[2]);
551 printf("Freeing test pages:\n");
552 vm_free_pages(normal_page, MPAGE_SIZE);
553 printf(" ... freed normal page\n");
554 vm_free_pages(big_page, MPAGE_SIZE);
555 printf(" ... freed big page\n");
556 }
557 #endif
558