1 /*
2 * %CopyrightBegin%
3 *
4 * Copyright Ericsson AB 2010-2020. All Rights Reserved.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * %CopyrightEnd%
19 */
20
21 /*
22 * Description: Pthread implementation of the ethread library
23 * Author: Rickard Green
24 */
25
26 #ifdef HAVE_CONFIG_H
27 #include "config.h"
28 #endif
29
30 #define ETHR_CHILD_WAIT_SPIN_COUNT 4000
31
32 #include <stdio.h>
33 #ifdef ETHR_TIME_WITH_SYS_TIME
34 # include <time.h>
35 # include <sys/time.h>
36 #else
37 # ifdef ETHR_HAVE_SYS_TIME_H
38 # include <sys/time.h>
39 # else
40 # include <time.h>
41 # endif
42 #endif
43 #include <sys/types.h>
44 #include <unistd.h>
45 #include <signal.h>
46 #include <string.h>
47
48 #include <limits.h>
49
50 #if defined (__HAIKU__)
51 #include <os/kernel/OS.h>
52 #endif
53
54 #define ETHR_INLINE_FUNC_NAME_(X) X ## __
55 #define ETHREAD_IMPL__
56
57 #include "ethread.h"
58 #undef ETHR_INCLUDE_MONOTONIC_CLOCK__
59 #define ETHR_INCLUDE_MONOTONIC_CLOCK__
60 #include "ethr_internal.h"
61
62 #ifndef ETHR_HAVE_ETHREAD_DEFINES
63 #error Missing configure defines
64 #endif
65
66 pthread_key_t ethr_ts_event_key__;
67 static int child_wait_spin_count;
68
69 /*
70 * --------------------------------------------------------------------------
71 * Static functions
72 * --------------------------------------------------------------------------
73 */
74
thr_exit_cleanup(void)75 static void thr_exit_cleanup(void)
76 {
77 ethr_run_exit_handlers__();
78 }
79
80
81 /* Argument passed to thr_wrapper() */
82 typedef struct {
83 ethr_atomic32_t result;
84 ethr_ts_event *tse;
85 void *(*thr_func)(void *);
86 void *arg;
87 void *prep_func_res;
88 size_t stacksize;
89 char *name;
90 char name_buff[32];
91 } ethr_thr_wrap_data__;
92
thr_wrapper(void * vtwd)93 static void *thr_wrapper(void *vtwd)
94 {
95 ethr_sint32_t result;
96 char c;
97 void *res;
98 ethr_thr_wrap_data__ *twd = (ethr_thr_wrap_data__ *) vtwd;
99 void *(*thr_func)(void *) = twd->thr_func;
100 void *arg = twd->arg;
101 ethr_ts_event *tsep = NULL;
102
103 ethr_set_stacklimit__(&c, twd->stacksize);
104
105 result = (ethr_sint32_t) ethr_make_ts_event__(&tsep, 0);
106
107 if (result == 0) {
108 tsep->iflgs |= ETHR_TS_EV_ETHREAD;
109 if (ethr_thr_child_func__)
110 ethr_thr_child_func__(twd->prep_func_res);
111 }
112
113 tsep = twd->tse; /* We aren't allowed to follow twd after
114 result has been set! */
115 if (twd->name)
116 ethr_setname(twd->name);
117
118 ethr_atomic32_set(&twd->result, result);
119
120 ethr_event_set(&tsep->event);
121
122 res = result == 0 ? (*thr_func)(arg) : NULL;
123
124 thr_exit_cleanup();
125 return res;
126 }
127
128 /* internal exports */
129
ethr_set_tse__(ethr_ts_event * tsep)130 int ethr_set_tse__(ethr_ts_event *tsep)
131 {
132 return pthread_setspecific(ethr_ts_event_key__, (void *) tsep);
133 }
134
ethr_get_tse__(void)135 ethr_ts_event *ethr_get_tse__(void)
136 {
137 return pthread_getspecific(ethr_ts_event_key__);
138 }
139
140 #if defined(ETHR_PPC_RUNTIME_CONF__)
141
142 #include <sys/wait.h>
143
144 static void
handle_lwsync_sigill(int signum)145 handle_lwsync_sigill(int signum)
146 {
147 _exit(1);
148 }
149
150 static int
ppc_init__(void)151 ppc_init__(void)
152 {
153 int pid;
154
155 /* If anything what so ever failes we assume no lwsync for safety */
156 ethr_runtime__.conf.have_lwsync = 0;
157
158 /*
159 * We perform the lwsync test (which might cause an illegal
160 * instruction signal) in a separate process in order to be
161 * completely certain that we do not mess up our own state.
162 */
163 pid = fork();
164 if (pid == 0) {
165 struct sigaction act, oact;
166
167 sigemptyset(&act.sa_mask);
168 act.sa_flags = SA_RESETHAND;
169 act.sa_handler = handle_lwsync_sigill;
170 if (sigaction(SIGILL, &act, &oact) != 0)
171 _exit(2);
172
173 __asm__ __volatile__ ("lwsync\n\t" : : : "memory");
174
175 _exit(0);
176 }
177
178 if (pid != -1) {
179 while (1) {
180 int status, res;
181 res = waitpid(pid, &status, 0);
182 if (res == pid) {
183 if (WIFEXITED(status) && WEXITSTATUS(status) == 0)
184 ethr_runtime__.conf.have_lwsync = 1;
185 break;
186 }
187 }
188 }
189 return 0;
190 }
191
192 #endif
193
194 #if defined(ETHR_X86_RUNTIME_CONF__)
195
196 void
ethr_x86_cpuid__(int * eax,int * ebx,int * ecx,int * edx)197 ethr_x86_cpuid__(int *eax, int *ebx, int *ecx, int *edx)
198 {
199 #if ETHR_SIZEOF_PTR == 4
200 int have_cpuid;
201 /*
202 * If it is possible to toggle eflags bit 21,
203 * we have the cpuid instruction.
204 */
205 __asm__ ("pushf\n\t"
206 "popl %%eax\n\t"
207 "movl %%eax, %%ecx\n\t"
208 "xorl $0x200000, %%eax\n\t"
209 "pushl %%eax\n\t"
210 "popf\n\t"
211 "pushf\n\t"
212 "popl %%eax\n\t"
213 "movl $0x0, %0\n\t"
214 "xorl %%ecx, %%eax\n\t"
215 "jz 1f\n\t"
216 "movl $0x1, %0\n\t"
217 "1:\n\t"
218 : "=r"(have_cpuid)
219 :
220 : "%eax", "%ecx", "cc");
221 if (!have_cpuid) {
222 *eax = *ebx = *ecx = *edx = 0;
223 return;
224 }
225 #endif
226 #if ETHR_SIZEOF_PTR == 4 && defined(__PIC__) && __PIC__
227 /*
228 * When position independet code is used in 32-bit mode, the B register
229 * is used for storage of global offset table address, and we may not
230 * use it as input or output in an asm. We need to save and restore the
231 * B register explicitly (for some reason gcc doesn't provide this
232 * service to us).
233 */
234 __asm__ ("pushl %%ebx\n\t"
235 "cpuid\n\t"
236 "movl %%ebx, %1\n\t"
237 "popl %%ebx\n\t"
238 : "=a"(*eax), "=r"(*ebx), "=c"(*ecx), "=d"(*edx)
239 : "0"(*eax)
240 : "cc");
241 #else
242 __asm__ ("cpuid\n\t"
243 : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx)
244 : "0"(*eax)
245 : "cc");
246 #endif
247 }
248
249 #endif /* ETHR_X86_RUNTIME_CONF__ */
250
251 #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME
252 static void init_get_monotonic_time(void);
253 #endif
254
255 /*
256 * --------------------------------------------------------------------------
257 * Exported functions
258 * --------------------------------------------------------------------------
259 */
260
261 int
ethr_init(ethr_init_data * id)262 ethr_init(ethr_init_data *id)
263 {
264 int res;
265
266 if (!ethr_not_inited__)
267 return EINVAL;
268
269 ethr_not_inited__ = 0;
270
271 #if defined(ETHR_PPC_RUNTIME_CONF__)
272 res = ppc_init__();
273 if (res != 0)
274 goto error;
275 #endif
276
277 #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME
278 init_get_monotonic_time();
279 #endif
280
281 res = ethr_init_common__(id);
282 if (res != 0)
283 goto error;
284
285 child_wait_spin_count = ETHR_CHILD_WAIT_SPIN_COUNT;
286 if (erts_get_cpu_configured(ethr_cpu_info__) == 1)
287 child_wait_spin_count = 0;
288
289 res = pthread_key_create(ðr_ts_event_key__, ethr_ts_event_destructor__);
290 if (res != 0)
291 goto error;
292
293 return 0;
294 error:
295 ethr_not_inited__ = 1;
296 return res;
297
298 }
299
300 int
ethr_late_init(ethr_late_init_data * id)301 ethr_late_init(ethr_late_init_data *id)
302 {
303 int res = ethr_late_init_common__(id);
304 if (res != 0)
305 return res;
306 ethr_not_completely_inited__ = 0;
307 return res;
308 }
309
310 int
ethr_thr_create(ethr_tid * tid,void * (* func)(void *),void * arg,ethr_thr_opts * opts)311 ethr_thr_create(ethr_tid *tid, void * (*func)(void *), void *arg,
312 ethr_thr_opts *opts)
313 {
314 ethr_thr_wrap_data__ twd;
315 pthread_attr_t attr;
316 int res, dres;
317 int use_stack_size = (opts && opts->suggested_stack_size >= 0
318 ? opts->suggested_stack_size
319 : -1 /* Use system default */);
320
321 #ifdef ETHR_MODIFIED_DEFAULT_STACK_SIZE
322 if (use_stack_size < 0)
323 use_stack_size = ETHR_MODIFIED_DEFAULT_STACK_SIZE;
324 #endif
325
326 #if ETHR_XCHK
327 if (ethr_not_completely_inited__) {
328 ETHR_ASSERT(0);
329 return EACCES;
330 }
331 if (!tid || !func) {
332 ETHR_ASSERT(0);
333 return EINVAL;
334 }
335 #endif
336
337 ethr_atomic32_init(&twd.result, (ethr_sint32_t) -1);
338 twd.thr_func = func;
339 twd.arg = arg;
340 twd.stacksize = 0;
341
342 if (opts && opts->name) {
343 size_t nlen = sizeof(twd.name_buff);
344 #ifdef __HAIKU__
345 if (nlen > B_OS_NAME_LENGTH)
346 nlen = B_OS_NAME_LENGTH;
347 #else
348 /*
349 * Length of 16 is known to work. At least pthread_setname_np()
350 * is documented to fail on too long name string, but documentation
351 * does not say what the limit is. Do not have the time to dig
352 * further into that now...
353 */
354 if (nlen > 16)
355 nlen = 16;
356 #endif
357 snprintf(twd.name_buff, nlen, "%s", opts->name);
358 twd.name = twd.name_buff;
359 } else
360 twd.name = NULL;
361
362 res = pthread_attr_init(&attr);
363 if (res != 0)
364 return res;
365
366 twd.tse = ethr_get_ts_event();
367
368 /* Error cleanup needed after this point */
369
370 /* Schedule child thread in system scope (if possible) ... */
371 res = pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
372 if (res != 0 && res != ENOTSUP)
373 goto error;
374
375 if (use_stack_size >= 0) {
376 size_t suggested_stack_size = (size_t) use_stack_size;
377 size_t stack_size;
378 #ifdef ETHR_DEBUG
379 suggested_stack_size /= 2; /* Make sure we got margin */
380 #endif
381 stack_size = ETHR_KW2B(suggested_stack_size);
382 stack_size = ETHR_PAGE_ALIGN(stack_size);
383 stack_size += ethr_pagesize__; /* For possible system usage */
384 #ifdef ETHR_STACK_GUARD_SIZE
385 /* The guard is at least on some platforms included in the stack size
386 passed when creating threads */
387 stack_size += ETHR_STACK_GUARD_SIZE;
388 #endif
389 if (stack_size < ethr_min_stack_size__)
390 stack_size = ethr_min_stack_size__;
391 else if (stack_size > ethr_max_stack_size__)
392 stack_size = ethr_max_stack_size__;
393 (void) pthread_attr_setstacksize(&attr, stack_size);
394 twd.stacksize = stack_size;
395 twd.stacksize -= ethr_pagesize__; /* For possible system usage */
396 #ifdef ETHR_STACK_GUARD_SIZE
397 twd.stacksize -= ETHR_STACK_GUARD_SIZE;
398 #endif
399 }
400
401 #ifdef ETHR_STACK_GUARD_SIZE
402 (void) pthread_attr_setguardsize(&attr, ETHR_STACK_GUARD_SIZE);
403 #endif
404
405 /* Detached or joinable... */
406 res = pthread_attr_setdetachstate(&attr,
407 (opts && opts->detached
408 ? PTHREAD_CREATE_DETACHED
409 : PTHREAD_CREATE_JOINABLE));
410 if (res != 0)
411 goto error;
412
413 /* Call prepare func if it exist */
414 if (ethr_thr_prepare_func__)
415 twd.prep_func_res = ethr_thr_prepare_func__();
416 else
417 twd.prep_func_res = NULL;
418
419 res = pthread_create((pthread_t *) tid, &attr, thr_wrapper, (void*) &twd);
420
421 if (res == 0) {
422 int spin_count = child_wait_spin_count;
423
424 /* Wait for child to initialize... */
425 while (1) {
426 ethr_sint32_t result;
427 ethr_event_reset(&twd.tse->event);
428
429 result = ethr_atomic32_read(&twd.result);
430 if (result == 0)
431 break;
432
433 if (result > 0) {
434 res = (int) result;
435 goto error;
436 }
437
438 res = ethr_event_swait(&twd.tse->event, spin_count);
439 if (res != 0 && res != EINTR)
440 goto error;
441 spin_count = 0;
442 }
443 }
444
445 /* Cleanup... */
446
447 error:
448 ethr_leave_ts_event(twd.tse);
449 dres = pthread_attr_destroy(&attr);
450 if (res == 0)
451 res = dres;
452 if (ethr_thr_parent_func__)
453 ethr_thr_parent_func__(twd.prep_func_res);
454 return res;
455 }
456
457 int
ethr_thr_join(ethr_tid tid,void ** res)458 ethr_thr_join(ethr_tid tid, void **res)
459 {
460 #if ETHR_XCHK
461 if (ethr_not_inited__) {
462 ETHR_ASSERT(0);
463 return EACCES;
464 }
465 #endif
466 return pthread_join((pthread_t) tid, res);
467 }
468
469 int
ethr_thr_detach(ethr_tid tid)470 ethr_thr_detach(ethr_tid tid)
471 {
472 #if ETHR_XCHK
473 if (ethr_not_inited__) {
474 ETHR_ASSERT(0);
475 return EACCES;
476 }
477 #endif
478 return pthread_detach((pthread_t) tid);
479 }
480
481 void
ethr_thr_exit(void * res)482 ethr_thr_exit(void *res)
483 {
484 #if ETHR_XCHK
485 if (ethr_not_inited__) {
486 ETHR_ASSERT(0);
487 return;
488 }
489 #endif
490 thr_exit_cleanup();
491 pthread_exit(res);
492 }
493
494 ethr_tid
ethr_self(void)495 ethr_self(void)
496 {
497 return (ethr_tid) pthread_self();
498 }
499
500 int
ethr_getname(ethr_tid tid,char * buf,size_t len)501 ethr_getname(ethr_tid tid, char *buf, size_t len)
502 {
503 #if defined(ETHR_HAVE_PTHREAD_GETNAME_NP_3)
504 return pthread_getname_np((pthread_t) tid, buf, len);
505 #elif defined(ETHR_HAVE_PTHREAD_GETNAME_NP_2)
506 return pthread_getname_np((pthread_t) tid, buf);
507 #else
508 return ENOSYS;
509 #endif
510 }
511
512 void
ethr_setname(char * name)513 ethr_setname(char *name)
514 {
515 #if defined(ETHR_HAVE_PTHREAD_SETNAME_NP_2)
516 pthread_setname_np(ethr_self(), name);
517 #elif defined(ETHR_HAVE_PTHREAD_SET_NAME_NP_2)
518 pthread_set_name_np(ethr_self(), name);
519 #elif defined(ETHR_HAVE_PTHREAD_SETNAME_NP_1)
520 pthread_setname_np(name);
521 #elif defined(__HAIKU__)
522 thread_id haiku_tid;
523 haiku_tid = get_pthread_thread_id(ethr_self());
524 if (!name) {
525 rename_thread (haiku_tid, "");
526 } else {
527 rename_thread (haiku_tid, name);
528 }
529 #endif
530 }
531
532 int
ethr_equal_tids(ethr_tid tid1,ethr_tid tid2)533 ethr_equal_tids(ethr_tid tid1, ethr_tid tid2)
534 {
535 return pthread_equal((pthread_t) tid1, (pthread_t) tid2);
536 }
537
538 /*
539 * Thread specific events
540 */
541
542 ethr_ts_event *
ethr_lookup_ts_event__(int busy_dup)543 ethr_lookup_ts_event__(int busy_dup)
544 {
545 return ethr_lookup_ts_event____(busy_dup);
546 }
547
548 ethr_ts_event *
ethr_peek_ts_event(void)549 ethr_peek_ts_event(void)
550 {
551 return ethr_peek_ts_event__();
552 }
553
554 void
ethr_unpeek_ts_event(ethr_ts_event * tsep)555 ethr_unpeek_ts_event(ethr_ts_event *tsep)
556 {
557 ethr_unpeek_ts_event__(tsep);
558 }
559
560 ethr_ts_event *
ethr_use_ts_event(ethr_ts_event * tsep)561 ethr_use_ts_event(ethr_ts_event *tsep)
562 {
563 return ethr_use_ts_event__(tsep);
564 }
565
566 ethr_ts_event *
ethr_get_ts_event(void)567 ethr_get_ts_event(void)
568 {
569 return ethr_get_ts_event__();
570 }
571
572 void
ethr_leave_ts_event(ethr_ts_event * tsep)573 ethr_leave_ts_event(ethr_ts_event *tsep)
574 {
575 ethr_leave_ts_event__(tsep);
576 }
577
578 /*
579 * Thread specific data
580 */
581
582 int
ethr_tsd_key_create(ethr_tsd_key * keyp,char * keyname)583 ethr_tsd_key_create(ethr_tsd_key *keyp, char *keyname)
584 {
585 #if ETHR_XCHK
586 if (ethr_not_inited__) {
587 ETHR_ASSERT(0);
588 return EACCES;
589 }
590 if (!keyp) {
591 ETHR_ASSERT(0);
592 return EINVAL;
593 }
594 #endif
595 return pthread_key_create((pthread_key_t *) keyp, NULL);
596 }
597
598 int
ethr_tsd_key_delete(ethr_tsd_key key)599 ethr_tsd_key_delete(ethr_tsd_key key)
600 {
601 #if ETHR_XCHK
602 if (ethr_not_inited__) {
603 ETHR_ASSERT(0);
604 return EACCES;
605 }
606 #endif
607 return pthread_key_delete((pthread_key_t) key);
608 }
609
610 int
ethr_tsd_set(ethr_tsd_key key,void * value)611 ethr_tsd_set(ethr_tsd_key key, void *value)
612 {
613 #if ETHR_XCHK
614 if (ethr_not_inited__) {
615 ETHR_ASSERT(0);
616 return EACCES;
617 }
618 #endif
619 return pthread_setspecific((pthread_key_t) key, value);
620 }
621
622 void *
ethr_tsd_get(ethr_tsd_key key)623 ethr_tsd_get(ethr_tsd_key key)
624 {
625 #if ETHR_XCHK
626 if (ethr_not_inited__) {
627 ETHR_ASSERT(0);
628 return NULL;
629 }
630 #endif
631 return pthread_getspecific((pthread_key_t) key);
632 }
633
634 /*
635 * Signal functions
636 */
637
638 #if ETHR_HAVE_ETHR_SIG_FUNCS
639
ethr_sigmask(int how,const sigset_t * set,sigset_t * oset)640 int ethr_sigmask(int how, const sigset_t *set, sigset_t *oset)
641 {
642 #if ETHR_XCHK
643 if (ethr_not_inited__) {
644 ETHR_ASSERT(0);
645 return EACCES;
646 }
647 if (!set && !oset) {
648 ETHR_ASSERT(0);
649 return EINVAL;
650 }
651 #endif
652 #if defined(__ANDROID__)
653 return sigprocmask(how, set, oset);
654 #else
655 return pthread_sigmask(how, set, oset);
656 #endif
657 }
658
ethr_sigwait(const sigset_t * set,int * sig)659 int ethr_sigwait(const sigset_t *set, int *sig)
660 {
661 #if ETHR_XCHK
662 if (ethr_not_inited__) {
663 ETHR_ASSERT(0);
664 return EACCES;
665 }
666 if (!set || !sig) {
667 ETHR_ASSERT(0);
668 return EINVAL;
669 }
670 #endif
671 if (sigwait(set, sig) < 0)
672 return errno;
673 return 0;
674 }
675
ethr_kill(const ethr_tid tid,const int sig)676 int ethr_kill(const ethr_tid tid, const int sig)
677 {
678 #if ETHR_XCHK
679 if (ethr_not_inited__) {
680 ETHR_ASSERT(0);
681 return EACCES;
682 }
683 #endif
684 return pthread_kill((const pthread_t)tid, sig);
685 }
686
687 #endif /* #if ETHR_HAVE_ETHR_SIG_FUNCS */
688
689 #ifdef ETHR_HAVE_ETHR_GET_MONOTONIC_TIME
690
691 static int broken_get_monotonic_time;
692
693 #if defined(ETHR_HAVE_CLOCK_GETTIME_MONOTONIC)
694 # ifndef ETHR_MONOTONIC_CLOCK_ID
695 # error ETHR_MONOTONIC_CLOCK_ID should have been defined
696 # endif
697
698 ethr_sint64_t
ethr_get_monotonic_time(void)699 ethr_get_monotonic_time(void)
700 {
701 ethr_sint64_t time;
702 struct timespec ts;
703
704 if (broken_get_monotonic_time)
705 return (ethr_sint64_t) 0;
706
707 if (0 != clock_gettime(ETHR_MONOTONIC_CLOCK_ID, &ts))
708 ETHR_FATAL_ERROR__(errno);
709
710 time = (ethr_sint64_t) ts.tv_sec;
711 time *= (ethr_sint64_t) 1000*1000*1000;
712 time += (ethr_sint64_t) ts.tv_nsec;
713 return time;
714 }
715
716 #elif defined(ETHR_HAVE_MACH_CLOCK_GET_TIME)
717 # ifndef ETHR_MONOTONIC_CLOCK_ID
718 # error ETHR_MONOTONIC_CLOCK_ID should have been defined
719 # endif
720
721 ethr_sint64_t
ethr_get_monotonic_time(void)722 ethr_get_monotonic_time(void)
723 {
724 ethr_sint64_t time;
725 kern_return_t res;
726 clock_serv_t clk_srv;
727 mach_timespec_t time_spec;
728
729 if (broken_get_monotonic_time)
730 return (ethr_sint64_t) 0;
731
732 errno = EFAULT;
733 host_get_clock_service(mach_host_self(),
734 ETHR_MONOTONIC_CLOCK_ID,
735 &clk_srv);
736 res = clock_get_time(clk_srv, &time_spec);
737 if (res != KERN_SUCCESS)
738 ETHR_FATAL_ERROR__(errno);
739 mach_port_deallocate(mach_task_self(), clk_srv);
740
741 time = (ethr_sint64_t) time_spec.tv_sec;
742 time *= (ethr_sint64_t) 1000*1000*1000;
743 time += (ethr_sint64_t) time_spec.tv_nsec;
744 return time;
745 }
746
747 #elif defined(ETHR_HAVE_GETHRTIME)
748
749 ethr_sint64_t
ethr_get_monotonic_time(void)750 ethr_get_monotonic_time(void)
751 {
752 if (broken_get_monotonic_time)
753 return (ethr_sint64_t) 0;
754 return (ethr_sint64_t) gethrtime();
755 }
756
757 #else
758 #error missing monotonic clock
759 #endif
760
761 int
ethr_get_monotonic_time_is_broken(void)762 ethr_get_monotonic_time_is_broken(void)
763 {
764 return broken_get_monotonic_time;
765 }
766
767 #include <string.h>
768 #include <ctype.h>
769 #include <sys/utsname.h>
770
771 static void
init_get_monotonic_time(void)772 init_get_monotonic_time(void)
773 {
774 struct utsname uts;
775 int vsn[3];
776 int i;
777 char *c;
778
779 broken_get_monotonic_time = 0;
780
781 (void) uname(&uts);
782
783 for (c = uts.sysname; *c; c++) {
784 if (isupper((int) *c))
785 *c = tolower((int) *c);
786 }
787
788 c = uts.release;
789 for (i = 0; i < sizeof(vsn)/sizeof(int); i++) {
790 if (!isdigit((int) *c))
791 vsn[i] = 0;
792 else {
793 char *c2 = c;
794 do {
795 c2++;
796 } while (isdigit((int) *c2));
797 *c2 = '\0';
798 vsn[i] = atoi(c);
799 c = c2;
800 c++;
801 }
802 }
803
804 if (strcmp("linux", uts.sysname) == 0) {
805 if (vsn[0] < 2
806 || (vsn[0] == 2 && vsn[1] < 6)
807 || (vsn[0] == 2 && vsn[1] == 6 && vsn[2] < 33)) {
808 broken_get_monotonic_time = 1;
809 }
810 }
811 else if (strcmp("sunos", uts.sysname) == 0) {
812 if ((vsn[0] < 5
813 || (vsn[0] == 5 && vsn[1] < 8))
814 #if defined(HAVE_SYSCONF) && defined(_SC_NPROCESSORS_CONF)
815 && sysconf(_SC_NPROCESSORS_CONF) > 1
816 #endif
817 ) {
818 broken_get_monotonic_time = 1;
819 }
820 }
821
822 }
823
824
825 #endif /* ETHR_HAVE_ETHR_GET_MONOTONIC_TIME */
826
827 ETHR_IMPL_NORETURN__
ethr_abort__(void)828 ethr_abort__(void)
829 {
830 abort();
831 }
832