1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3  * License, v. 2.0. If a copy of the MPL was not distributed with this
4  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5 
6 #include "primpl.h"
7 #include <sys/types.h>
8 #include <unistd.h>
9 #include <signal.h>
10 #include <pthread.h>
11 
12 
13 sigset_t ints_off;
14 pthread_mutex_t _pr_heapLock;
15 pthread_key_t current_thread_key;
16 pthread_key_t current_cpu_key;
17 pthread_key_t last_thread_key;
18 pthread_key_t intsoff_key;
19 
20 
21 PRInt32 _pr_md_pthreads_created, _pr_md_pthreads_failed;
22 PRInt32 _pr_md_pthreads = 1;
23 
24 void _MD_EarlyInit(void)
25 {
26     extern PRInt32 _nspr_noclock;
27 
28     if (pthread_key_create(&current_thread_key, NULL) != 0) {
29         perror("pthread_key_create failed");
30         exit(1);
31     }
32     if (pthread_key_create(&current_cpu_key, NULL) != 0) {
33         perror("pthread_key_create failed");
34         exit(1);
35     }
36     if (pthread_key_create(&last_thread_key, NULL) != 0) {
37         perror("pthread_key_create failed");
38         exit(1);
39     }
40     if (pthread_key_create(&intsoff_key, NULL) != 0) {
41         perror("pthread_key_create failed");
42         exit(1);
43     }
44 
45     sigemptyset(&ints_off);
46     sigaddset(&ints_off, SIGALRM);
47     sigaddset(&ints_off, SIGIO);
48     sigaddset(&ints_off, SIGCLD);
49 
50     /*
51      * disable clock interrupts
52      */
53     _nspr_noclock = 1;
54 
55 }
56 
57 void _MD_InitLocks()
58 {
59     if (pthread_mutex_init(&_pr_heapLock, NULL) != 0) {
60         perror("pthread_mutex_init failed");
61         exit(1);
62     }
63 }
64 
65 PR_IMPLEMENT(void) _MD_FREE_LOCK(struct _MDLock *lockp)
66 {
67     PRIntn _is;
68     PRThread *me = _PR_MD_CURRENT_THREAD();
69 
70     if (me && !_PR_IS_NATIVE_THREAD(me)) {
71         _PR_INTSOFF(_is);
72     }
73     pthread_mutex_destroy(&lockp->mutex);
74     if (me && !_PR_IS_NATIVE_THREAD(me)) {
75         _PR_FAST_INTSON(_is);
76     }
77 }
78 
79 
80 
81 PR_IMPLEMENT(PRStatus) _MD_NEW_LOCK(struct _MDLock *lockp)
82 {
83     PRStatus rv;
84     PRIntn is;
85     PRThread *me = _PR_MD_CURRENT_THREAD();
86 
87     if (me && !_PR_IS_NATIVE_THREAD(me)) {
88         _PR_INTSOFF(is);
89     }
90     rv = pthread_mutex_init(&lockp->mutex, NULL);
91     if (me && !_PR_IS_NATIVE_THREAD(me)) {
92         _PR_FAST_INTSON(is);
93     }
94     return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
95 }
96 
97 
98 PRWord *_MD_HomeGCRegisters(PRThread *t, int isCurrent, int *np)
99 {
100     if (isCurrent) {
101         (void) setjmp(CONTEXT(t));
102     }
103     *np = sizeof(CONTEXT(t)) / sizeof(PRWord);
104     return (PRWord *) CONTEXT(t);
105 }
106 
107 PR_IMPLEMENT(void)
108 _MD_SetPriority(_MDThread *thread, PRThreadPriority newPri)
109 {
110     /*
111      * XXX - to be implemented
112      */
113     return;
114 }
115 
116 PR_IMPLEMENT(PRStatus) _MD_InitThread(struct PRThread *thread)
117 {
118     struct sigaction sigact;
119 
120     if (thread->flags & _PR_GLOBAL_SCOPE) {
121         thread->md.pthread = pthread_self();
122 #if 0
123         /*
124          * set up SIGUSR1 handler; this is used to save state
125          * during PR_SuspendAll
126          */
127         sigact.sa_handler = save_context_and_block;
128         sigact.sa_flags = SA_RESTART;
129         /*
130          * Must mask clock interrupts
131          */
132         sigact.sa_mask = timer_set;
133         sigaction(SIGUSR1, &sigact, 0);
134 #endif
135     }
136 
137     return PR_SUCCESS;
138 }
139 
140 PR_IMPLEMENT(void) _MD_ExitThread(struct PRThread *thread)
141 {
142     if (thread->flags & _PR_GLOBAL_SCOPE) {
143         _MD_CLEAN_THREAD(thread);
144         _MD_SET_CURRENT_THREAD(NULL);
145     }
146 }
147 
148 PR_IMPLEMENT(void) _MD_CleanThread(struct PRThread *thread)
149 {
150     if (thread->flags & _PR_GLOBAL_SCOPE) {
151         pthread_mutex_destroy(&thread->md.pthread_mutex);
152         pthread_cond_destroy(&thread->md.pthread_cond);
153     }
154 }
155 
156 PR_IMPLEMENT(void) _MD_SuspendThread(struct PRThread *thread)
157 {
158     PRInt32 rv;
159 
160     PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
161               _PR_IS_GCABLE_THREAD(thread));
162 #if 0
163     thread->md.suspending_id = getpid();
164     rv = kill(thread->md.id, SIGUSR1);
165     PR_ASSERT(rv == 0);
166     /*
167      * now, block the current thread/cpu until woken up by the suspended
168      * thread from it's SIGUSR1 signal handler
169      */
170     blockproc(getpid());
171 #endif
172 }
173 
174 PR_IMPLEMENT(void) _MD_ResumeThread(struct PRThread *thread)
175 {
176     PRInt32 rv;
177 
178     PR_ASSERT((thread->flags & _PR_GLOBAL_SCOPE) &&
179               _PR_IS_GCABLE_THREAD(thread));
180 #if 0
181     rv = unblockproc(thread->md.id);
182 #endif
183 }
184 
185 PR_IMPLEMENT(void) _MD_SuspendCPU(struct _PRCPU *thread)
186 {
187     PRInt32 rv;
188 
189 #if 0
190     cpu->md.suspending_id = getpid();
191     rv = kill(cpu->md.id, SIGUSR1);
192     PR_ASSERT(rv == 0);
193     /*
194      * now, block the current thread/cpu until woken up by the suspended
195      * thread from it's SIGUSR1 signal handler
196      */
197     blockproc(getpid());
198 #endif
199 }
200 
201 PR_IMPLEMENT(void) _MD_ResumeCPU(struct _PRCPU *thread)
202 {
203 #if 0
204     unblockproc(cpu->md.id);
205 #endif
206 }
207 
208 
209 #define PT_NANOPERMICRO 1000UL
210 #define PT_BILLION 1000000000UL
211 
212 PR_IMPLEMENT(PRStatus)
213 _pt_wait(PRThread *thread, PRIntervalTime timeout)
214 {
215     int rv;
216     struct timeval now;
217     struct timespec tmo;
218     PRUint32 ticks = PR_TicksPerSecond();
219 
220 
221     if (timeout != PR_INTERVAL_NO_TIMEOUT) {
222         tmo.tv_sec = timeout / ticks;
223         tmo.tv_nsec = timeout - (tmo.tv_sec * ticks);
224         tmo.tv_nsec = PR_IntervalToMicroseconds(PT_NANOPERMICRO *
225                                                 tmo.tv_nsec);
226 
227         /* pthreads wants this in absolute time, off we go ... */
228         (void)GETTIMEOFDAY(&now);
229         /* that one's usecs, this one's nsecs - grrrr! */
230         tmo.tv_sec += now.tv_sec;
231         tmo.tv_nsec += (PT_NANOPERMICRO * now.tv_usec);
232         tmo.tv_sec += tmo.tv_nsec / PT_BILLION;
233         tmo.tv_nsec %= PT_BILLION;
234     }
235 
236     pthread_mutex_lock(&thread->md.pthread_mutex);
237     thread->md.wait--;
238     if (thread->md.wait < 0) {
239         if (timeout != PR_INTERVAL_NO_TIMEOUT) {
240             rv = pthread_cond_timedwait(&thread->md.pthread_cond,
241                                         &thread->md.pthread_mutex, &tmo);
242         }
243         else
244             rv = pthread_cond_wait(&thread->md.pthread_cond,
245                                    &thread->md.pthread_mutex);
246         if (rv != 0) {
247             thread->md.wait++;
248         }
249     } else {
250         rv = 0;
251     }
252     pthread_mutex_unlock(&thread->md.pthread_mutex);
253 
254     return (rv == 0) ? PR_SUCCESS : PR_FAILURE;
255 }
256 
257 PR_IMPLEMENT(PRStatus)
258 _MD_wait(PRThread *thread, PRIntervalTime ticks)
259 {
260     if ( thread->flags & _PR_GLOBAL_SCOPE ) {
261         _MD_CHECK_FOR_EXIT();
262         if (_pt_wait(thread, ticks) == PR_FAILURE) {
263             _MD_CHECK_FOR_EXIT();
264             /*
265              * wait timed out
266              */
267             _PR_THREAD_LOCK(thread);
268             if (thread->wait.cvar) {
269                 /*
270                  * The thread will remove itself from the waitQ
271                  * of the cvar in _PR_WaitCondVar
272                  */
273                 thread->wait.cvar = NULL;
274                 thread->state =  _PR_RUNNING;
275                 _PR_THREAD_UNLOCK(thread);
276             }  else {
277                 _pt_wait(thread, PR_INTERVAL_NO_TIMEOUT);
278                 _PR_THREAD_UNLOCK(thread);
279             }
280         }
281     } else {
282         _PR_MD_SWITCH_CONTEXT(thread);
283     }
284     return PR_SUCCESS;
285 }
286 
287 PR_IMPLEMENT(PRStatus)
288 _MD_WakeupWaiter(PRThread *thread)
289 {
290     PRThread *me = _PR_MD_CURRENT_THREAD();
291     PRInt32 pid, rv;
292     PRIntn is;
293 
294     PR_ASSERT(_pr_md_idle_cpus >= 0);
295     if (thread == NULL) {
296         if (_pr_md_idle_cpus) {
297             _MD_Wakeup_CPUs();
298         }
299     } else if (!_PR_IS_NATIVE_THREAD(thread)) {
300         /*
301          * If the thread is on my cpu's runq there is no need to
302          * wakeup any cpus
303          */
304         if (!_PR_IS_NATIVE_THREAD(me)) {
305             if (me->cpu != thread->cpu) {
306                 if (_pr_md_idle_cpus) {
307                     _MD_Wakeup_CPUs();
308                 }
309             }
310         } else {
311             if (_pr_md_idle_cpus) {
312                 _MD_Wakeup_CPUs();
313             }
314         }
315     } else {
316         PR_ASSERT(_PR_IS_NATIVE_THREAD(thread));
317         if (!_PR_IS_NATIVE_THREAD(me)) {
318             _PR_INTSOFF(is);
319         }
320 
321         pthread_mutex_lock(&thread->md.pthread_mutex);
322         thread->md.wait++;
323         rv = pthread_cond_signal(&thread->md.pthread_cond);
324         PR_ASSERT(rv == 0);
325         pthread_mutex_unlock(&thread->md.pthread_mutex);
326 
327         if (!_PR_IS_NATIVE_THREAD(me)) {
328             _PR_FAST_INTSON(is);
329         }
330     }
331     return PR_SUCCESS;
332 }
333 
334 /* These functions should not be called for AIX */
335 PR_IMPLEMENT(void)
336 _MD_YIELD(void)
337 {
338     PR_NOT_REACHED("_MD_YIELD should not be called for AIX.");
339 }
340 
341 PR_IMPLEMENT(PRStatus)
342 _MD_CreateThread(
343     PRThread *thread,
344     void (*start) (void *),
345     PRThreadPriority priority,
346     PRThreadScope scope,
347     PRThreadState state,
348     PRUint32 stackSize)
349 {
350     PRIntn is;
351     int rv;
352     PRThread *me = _PR_MD_CURRENT_THREAD();
353     pthread_attr_t attr;
354 
355     if (!_PR_IS_NATIVE_THREAD(me)) {
356         _PR_INTSOFF(is);
357     }
358 
359     if (pthread_mutex_init(&thread->md.pthread_mutex, NULL) != 0) {
360         if (!_PR_IS_NATIVE_THREAD(me)) {
361             _PR_FAST_INTSON(is);
362         }
363         return PR_FAILURE;
364     }
365 
366     if (pthread_cond_init(&thread->md.pthread_cond, NULL) != 0) {
367         pthread_mutex_destroy(&thread->md.pthread_mutex);
368         if (!_PR_IS_NATIVE_THREAD(me)) {
369             _PR_FAST_INTSON(is);
370         }
371         return PR_FAILURE;
372     }
373     thread->flags |= _PR_GLOBAL_SCOPE;
374 
375     pthread_attr_init(&attr); /* initialize attr with default attributes */
376     if (pthread_attr_setstacksize(&attr, (size_t) stackSize) != 0) {
377         pthread_mutex_destroy(&thread->md.pthread_mutex);
378         pthread_cond_destroy(&thread->md.pthread_cond);
379         pthread_attr_destroy(&attr);
380         if (!_PR_IS_NATIVE_THREAD(me)) {
381             _PR_FAST_INTSON(is);
382         }
383         return PR_FAILURE;
384     }
385 
386     thread->md.wait = 0;
387     rv = pthread_create(&thread->md.pthread, &attr, start, (void *)thread);
388     if (0 == rv) {
389         _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_created);
390         _MD_ATOMIC_INCREMENT(&_pr_md_pthreads);
391         if (!_PR_IS_NATIVE_THREAD(me)) {
392             _PR_FAST_INTSON(is);
393         }
394         return PR_SUCCESS;
395     } else {
396         pthread_mutex_destroy(&thread->md.pthread_mutex);
397         pthread_cond_destroy(&thread->md.pthread_cond);
398         pthread_attr_destroy(&attr);
399         _MD_ATOMIC_INCREMENT(&_pr_md_pthreads_failed);
400         if (!_PR_IS_NATIVE_THREAD(me)) {
401             _PR_FAST_INTSON(is);
402         }
403         PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, rv);
404         return PR_FAILURE;
405     }
406 }
407 
408 PR_IMPLEMENT(void)
409 _MD_InitRunningCPU(struct _PRCPU *cpu)
410 {
411     extern int _pr_md_pipefd[2];
412 
413     _MD_unix_init_running_cpu(cpu);
414     cpu->md.pthread = pthread_self();
415     if (_pr_md_pipefd[0] >= 0) {
416         _PR_IOQ_MAX_OSFD(cpu) = _pr_md_pipefd[0];
417 #ifndef _PR_USE_POLL
418         FD_SET(_pr_md_pipefd[0], &_PR_FD_READ_SET(cpu));
419 #endif
420     }
421 }
422 
423 
424 void
425 _MD_CleanupBeforeExit(void)
426 {
427 #if 0
428     extern PRInt32    _pr_cpus_exit;
429 
430     _pr_irix_exit_now = 1;
431     if (_pr_numCPU > 1) {
432         /*
433          * Set a global flag, and wakeup all cpus which will notice the flag
434          * and exit.
435          */
436         _pr_cpus_exit = getpid();
437         _MD_Wakeup_CPUs();
438         while(_pr_numCPU > 1) {
439             _PR_WAIT_SEM(_pr_irix_exit_sem);
440             _pr_numCPU--;
441         }
442     }
443     /*
444      * cause global threads on the recycle list to exit
445      */
446     _PR_DEADQ_LOCK;
447     if (_PR_NUM_DEADNATIVE != 0) {
448         PRThread *thread;
449         PRCList *ptr;
450 
451         ptr = _PR_DEADNATIVEQ.next;
452         while( ptr != &_PR_DEADNATIVEQ ) {
453             thread = _PR_THREAD_PTR(ptr);
454             _MD_CVAR_POST_SEM(thread);
455             ptr = ptr->next;
456         }
457     }
458     _PR_DEADQ_UNLOCK;
459     while(_PR_NUM_DEADNATIVE > 1) {
460         _PR_WAIT_SEM(_pr_irix_exit_sem);
461         _PR_DEC_DEADNATIVE;
462     }
463 #endif
464 }
465