1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 #include "primpl.h"
7 #include <signal.h>
8 #include <string.h>
9
10 #if defined(WIN95)
11 /*
12 ** Some local variables report warnings on Win95 because the code paths
13 ** using them are conditioned on HAVE_CUSTOME_USER_THREADS.
14 ** The pragma suppresses the warning.
15 **
16 */
17 #pragma warning(disable : 4101)
18 #endif
19
20 /* _pr_activeLock protects the following global variables */
21 PRLock *_pr_activeLock;
22 PRInt32 _pr_primordialExitCount; /* In PR_Cleanup(), the primordial thread
23 * waits until all other user (non-system)
24 * threads have terminated before it exits.
25 * So whenever we decrement _pr_userActive,
26 * it is compared with
27 * _pr_primordialExitCount.
28 * If the primordial thread is a system
29 * thread, then _pr_primordialExitCount
30 * is 0. If the primordial thread is
31 * itself a user thread, then
32 * _pr_primordialThread is 1.
33 */
34 PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to
35 * _pr_primordialExitCount, this condition
36 * variable is notified.
37 */
38
39 PRLock *_pr_deadQLock;
40 PRUint32 _pr_numNativeDead;
41 PRUint32 _pr_numUserDead;
42 PRCList _pr_deadNativeQ;
43 PRCList _pr_deadUserQ;
44
45 PRUint32 _pr_join_counter;
46
47 PRUint32 _pr_local_threads;
48 PRUint32 _pr_global_threads;
49
50 PRBool suspendAllOn = PR_FALSE;
51 PRThread *suspendAllThread = NULL;
52
53 extern PRCList _pr_active_global_threadQ;
54 extern PRCList _pr_active_local_threadQ;
55
56 static void _PR_DecrActiveThreadCount(PRThread *thread);
57 static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack *);
58 static void _PR_InitializeNativeStack(PRThreadStack *ts);
59 static void _PR_InitializeRecycledThread(PRThread *thread);
60 static void _PR_UserRunThread(void);
61
_PR_InitThreads(PRThreadType type,PRThreadPriority priority,PRUintn maxPTDs)62 void _PR_InitThreads(PRThreadType type, PRThreadPriority priority,
63 PRUintn maxPTDs)
64 {
65 PRThread *thread;
66 PRThreadStack *stack;
67
68 PR_ASSERT(priority == PR_PRIORITY_NORMAL);
69
70 _pr_terminationCVLock = PR_NewLock();
71 _pr_activeLock = PR_NewLock();
72
73 #ifndef HAVE_CUSTOM_USER_THREADS
74 stack = PR_NEWZAP(PRThreadStack);
75 #ifdef HAVE_STACK_GROWING_UP
76 stack->stackTop = (char*) ((((PRWord)&type) >> _pr_pageShift)
77 << _pr_pageShift);
78 #else
79 #if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS)
80 stack->stackTop = (char*) &thread;
81 #else
82 stack->stackTop = (char*) ((((PRWord)&type + _pr_pageSize - 1)
83 >> _pr_pageShift) << _pr_pageShift);
84 #endif
85 #endif
86 #else
87 /* If stack is NULL, we're using custom user threads like NT fibers. */
88 stack = PR_NEWZAP(PRThreadStack);
89 if (stack) {
90 stack->stackSize = 0;
91 _PR_InitializeNativeStack(stack);
92 }
93 #endif /* HAVE_CUSTOM_USER_THREADS */
94
95 thread = _PR_AttachThread(type, priority, stack);
96 if (thread) {
97 _PR_MD_SET_CURRENT_THREAD(thread);
98
99 if (type == PR_SYSTEM_THREAD) {
100 thread->flags = _PR_SYSTEM;
101 _pr_systemActive++;
102 _pr_primordialExitCount = 0;
103 } else {
104 _pr_userActive++;
105 _pr_primordialExitCount = 1;
106 }
107 thread->no_sched = 1;
108 _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock);
109 }
110
111 if (!thread) {
112 PR_Abort();
113 }
114 #ifdef _PR_LOCAL_THREADS_ONLY
115 thread->flags |= _PR_PRIMORDIAL;
116 #else
117 thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE;
118 #endif
119
120 /*
121 * Needs _PR_PRIMORDIAL flag set before calling
122 * _PR_MD_INIT_THREAD()
123 */
124 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
125 /*
126 * XXX do what?
127 */
128 }
129
130 if (_PR_IS_NATIVE_THREAD(thread)) {
131 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
132 _pr_global_threads++;
133 } else {
134 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
135 _pr_local_threads++;
136 }
137
138 _pr_recycleThreads = 0;
139 _pr_deadQLock = PR_NewLock();
140 _pr_numNativeDead = 0;
141 _pr_numUserDead = 0;
142 PR_INIT_CLIST(&_pr_deadNativeQ);
143 PR_INIT_CLIST(&_pr_deadUserQ);
144 }
145
_PR_CleanupThreads(void)146 void _PR_CleanupThreads(void)
147 {
148 if (_pr_terminationCVLock) {
149 PR_DestroyLock(_pr_terminationCVLock);
150 _pr_terminationCVLock = NULL;
151 }
152 if (_pr_activeLock) {
153 PR_DestroyLock(_pr_activeLock);
154 _pr_activeLock = NULL;
155 }
156 if (_pr_primordialExitCVar) {
157 PR_DestroyCondVar(_pr_primordialExitCVar);
158 _pr_primordialExitCVar = NULL;
159 }
160 /* TODO _pr_dead{Native,User}Q need to be deleted */
161 if (_pr_deadQLock) {
162 PR_DestroyLock(_pr_deadQLock);
163 _pr_deadQLock = NULL;
164 }
165 }
166
167 /*
168 ** Initialize a stack for a native thread
169 */
_PR_InitializeNativeStack(PRThreadStack * ts)170 static void _PR_InitializeNativeStack(PRThreadStack *ts)
171 {
172 if( ts && (ts->stackTop == 0) ) {
173 ts->allocSize = ts->stackSize;
174
175 /*
176 ** Setup stackTop and stackBottom values.
177 */
178 #ifdef HAVE_STACK_GROWING_UP
179 ts->allocBase = (char*) ((((PRWord)&ts) >> _pr_pageShift)
180 << _pr_pageShift);
181 ts->stackBottom = ts->allocBase + ts->stackSize;
182 ts->stackTop = ts->allocBase;
183 #else
184 ts->allocBase = (char*) ((((PRWord)&ts + _pr_pageSize - 1)
185 >> _pr_pageShift) << _pr_pageShift);
186 ts->stackTop = ts->allocBase;
187 ts->stackBottom = ts->allocBase - ts->stackSize;
188 #endif
189 }
190 }
191
_PR_NotifyJoinWaiters(PRThread * thread)192 void _PR_NotifyJoinWaiters(PRThread *thread)
193 {
194 /*
195 ** Handle joinable threads. Change the state to waiting for join.
196 ** Remove from our run Q and put it on global waiting to join Q.
197 ** Notify on our "termination" condition variable so that joining
198 ** thread will know about our termination. Switch our context and
199 ** come back later on to continue the cleanup.
200 */
201 PR_ASSERT(thread == _PR_MD_CURRENT_THREAD());
202 if (thread->term != NULL) {
203 PR_Lock(_pr_terminationCVLock);
204 _PR_THREAD_LOCK(thread);
205 thread->state = _PR_JOIN_WAIT;
206 if ( !_PR_IS_NATIVE_THREAD(thread) ) {
207 _PR_MISCQ_LOCK(thread->cpu);
208 _PR_ADD_JOINQ(thread, thread->cpu);
209 _PR_MISCQ_UNLOCK(thread->cpu);
210 }
211 _PR_THREAD_UNLOCK(thread);
212 PR_NotifyCondVar(thread->term);
213 PR_Unlock(_pr_terminationCVLock);
214 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
215 PR_ASSERT(thread->state != _PR_JOIN_WAIT);
216 }
217
218 }
219
220 /*
221 * Zero some of the data members of a recycled thread.
222 *
223 * Note that we can do this either when a dead thread is added to
224 * the dead thread queue or when it is reused. Here, we are doing
225 * this lazily, when the thread is reused in _PR_CreateThread().
226 */
_PR_InitializeRecycledThread(PRThread * thread)227 static void _PR_InitializeRecycledThread(PRThread *thread)
228 {
229 /*
230 * Assert that the following data members are already zeroed
231 * by _PR_CleanupThread().
232 */
233 #ifdef DEBUG
234 if (thread->privateData) {
235 unsigned int i;
236 for (i = 0; i < thread->tpdLength; i++) {
237 PR_ASSERT(thread->privateData[i] == NULL);
238 }
239 }
240 #endif
241 PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0);
242 PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0);
243 PR_ASSERT(thread->errorStringLength == 0);
244 PR_ASSERT(thread->name == 0);
245
246 /* Reset data members in thread structure */
247 thread->errorCode = thread->osErrorCode = 0;
248 thread->io_pending = thread->io_suspended = PR_FALSE;
249 thread->environment = 0;
250 PR_INIT_CLIST(&thread->lockList);
251 }
252
_PR_RecycleThread(PRThread * thread)253 PRStatus _PR_RecycleThread(PRThread *thread)
254 {
255 if ( _PR_IS_NATIVE_THREAD(thread) &&
256 _PR_NUM_DEADNATIVE < _pr_recycleThreads) {
257 _PR_DEADQ_LOCK;
258 PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ);
259 _PR_INC_DEADNATIVE;
260 _PR_DEADQ_UNLOCK;
261 return (PR_SUCCESS);
262 } else if ( !_PR_IS_NATIVE_THREAD(thread) &&
263 _PR_NUM_DEADUSER < _pr_recycleThreads) {
264 _PR_DEADQ_LOCK;
265 PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ);
266 _PR_INC_DEADUSER;
267 _PR_DEADQ_UNLOCK;
268 return (PR_SUCCESS);
269 }
270 return (PR_FAILURE);
271 }
272
273 /*
274 * Decrement the active thread count, either _pr_systemActive or
275 * _pr_userActive, depending on whether the thread is a system thread
276 * or a user thread. If all the user threads, except possibly
277 * the primordial thread, have terminated, we notify the primordial
278 * thread of this condition.
279 *
280 * Since this function will lock _pr_activeLock, do not call this
281 * function while holding the _pr_activeLock lock, as this will result
282 * in a deadlock.
283 */
284
285 static void
_PR_DecrActiveThreadCount(PRThread * thread)286 _PR_DecrActiveThreadCount(PRThread *thread)
287 {
288 PR_Lock(_pr_activeLock);
289 if (thread->flags & _PR_SYSTEM) {
290 _pr_systemActive--;
291 } else {
292 _pr_userActive--;
293 if (_pr_userActive == _pr_primordialExitCount) {
294 PR_NotifyCondVar(_pr_primordialExitCVar);
295 }
296 }
297 PR_Unlock(_pr_activeLock);
298 }
299
300 /*
301 ** Detach thread structure
302 */
303 static void
_PR_DestroyThread(PRThread * thread)304 _PR_DestroyThread(PRThread *thread)
305 {
306 _PR_MD_FREE_LOCK(&thread->threadLock);
307 PR_DELETE(thread);
308 }
309
310 void
_PR_NativeDestroyThread(PRThread * thread)311 _PR_NativeDestroyThread(PRThread *thread)
312 {
313 if(thread->term) {
314 PR_DestroyCondVar(thread->term);
315 thread->term = 0;
316 }
317 if (NULL != thread->privateData) {
318 PR_ASSERT(0 != thread->tpdLength);
319 PR_DELETE(thread->privateData);
320 thread->tpdLength = 0;
321 }
322 PR_DELETE(thread->stack);
323 _PR_DestroyThread(thread);
324 }
325
326 void
_PR_UserDestroyThread(PRThread * thread)327 _PR_UserDestroyThread(PRThread *thread)
328 {
329 if(thread->term) {
330 PR_DestroyCondVar(thread->term);
331 thread->term = 0;
332 }
333 if (NULL != thread->privateData) {
334 PR_ASSERT(0 != thread->tpdLength);
335 PR_DELETE(thread->privateData);
336 thread->tpdLength = 0;
337 }
338 _PR_MD_FREE_LOCK(&thread->threadLock);
339 if (thread->threadAllocatedOnStack == 1) {
340 _PR_MD_CLEAN_THREAD(thread);
341 /*
342 * Because the no_sched field is set, this thread/stack will
343 * will not be re-used until the flag is cleared by the thread
344 * we will context switch to.
345 */
346 _PR_FreeStack(thread->stack);
347 } else {
348 #ifdef WINNT
349 _PR_MD_CLEAN_THREAD(thread);
350 #else
351 /*
352 * This assertion does not apply to NT. On NT, every fiber
353 * has its threadAllocatedOnStack equal to 0. Elsewhere,
354 * only the primordial thread has its threadAllocatedOnStack
355 * equal to 0.
356 */
357 PR_ASSERT(thread->flags & _PR_PRIMORDIAL);
358 #endif
359 }
360 }
361
362
363 /*
364 ** Run a thread's start function. When the start function returns the
365 ** thread is done executing and no longer needs the CPU. If there are no
366 ** more user threads running then we can exit the program.
367 */
_PR_NativeRunThread(void * arg)368 void _PR_NativeRunThread(void *arg)
369 {
370 PRThread *thread = (PRThread *)arg;
371
372 _PR_MD_SET_CURRENT_THREAD(thread);
373
374 _PR_MD_SET_CURRENT_CPU(NULL);
375
376 /* Set up the thread stack information */
377 _PR_InitializeNativeStack(thread->stack);
378
379 /* Set up the thread md information */
380 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
381 /*
382 * thread failed to initialize itself, possibly due to
383 * failure to allocate per-thread resources
384 */
385 return;
386 }
387
388 while(1) {
389 thread->state = _PR_RUNNING;
390
391 /*
392 * Add to list of active threads
393 */
394 PR_Lock(_pr_activeLock);
395 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ());
396 _pr_global_threads++;
397 PR_Unlock(_pr_activeLock);
398
399 (*thread->startFunc)(thread->arg);
400
401 /*
402 * The following two assertions are meant for NT asynch io.
403 *
404 * The thread should have no asynch io in progress when it
405 * exits, otherwise the overlapped buffer, which is part of
406 * the thread structure, would become invalid.
407 */
408 PR_ASSERT(thread->io_pending == PR_FALSE);
409 /*
410 * This assertion enforces the programming guideline that
411 * if an io function times out or is interrupted, the thread
412 * should close the fd to force the asynch io to abort
413 * before it exits. Right now, closing the fd is the only
414 * way to clear the io_suspended flag.
415 */
416 PR_ASSERT(thread->io_suspended == PR_FALSE);
417
418 /*
419 * remove thread from list of active threads
420 */
421 PR_Lock(_pr_activeLock);
422 PR_REMOVE_LINK(&thread->active);
423 _pr_global_threads--;
424 PR_Unlock(_pr_activeLock);
425
426 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
427
428 /* All done, time to go away */
429 _PR_CleanupThread(thread);
430
431 _PR_NotifyJoinWaiters(thread);
432
433 _PR_DecrActiveThreadCount(thread);
434
435 thread->state = _PR_DEAD_STATE;
436
437 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
438 PR_FAILURE)) {
439 /*
440 * thread not recycled
441 * platform-specific thread exit processing
442 * - for stuff like releasing native-thread resources, etc.
443 */
444 _PR_MD_EXIT_THREAD(thread);
445 /*
446 * Free memory allocated for the thread
447 */
448 _PR_NativeDestroyThread(thread);
449 /*
450 * thread gone, cannot de-reference thread now
451 */
452 return;
453 }
454
455 /* Now wait for someone to activate us again... */
456 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT);
457 }
458 }
459
_PR_UserRunThread(void)460 static void _PR_UserRunThread(void)
461 {
462 PRThread *thread = _PR_MD_CURRENT_THREAD();
463 PRIntn is;
464
465 if (_MD_LAST_THREAD()) {
466 _MD_LAST_THREAD()->no_sched = 0;
467 }
468
469 #ifdef HAVE_CUSTOM_USER_THREADS
470 if (thread->stack == NULL) {
471 thread->stack = PR_NEWZAP(PRThreadStack);
472 _PR_InitializeNativeStack(thread->stack);
473 }
474 #endif /* HAVE_CUSTOM_USER_THREADS */
475
476 while(1) {
477 /* Run thread main */
478 if ( !_PR_IS_NATIVE_THREAD(thread)) {
479 _PR_MD_SET_INTSOFF(0);
480 }
481
482 /*
483 * Add to list of active threads
484 */
485 if (!(thread->flags & _PR_IDLE_THREAD)) {
486 PR_Lock(_pr_activeLock);
487 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ());
488 _pr_local_threads++;
489 PR_Unlock(_pr_activeLock);
490 }
491
492 (*thread->startFunc)(thread->arg);
493
494 /*
495 * The following two assertions are meant for NT asynch io.
496 *
497 * The thread should have no asynch io in progress when it
498 * exits, otherwise the overlapped buffer, which is part of
499 * the thread structure, would become invalid.
500 */
501 PR_ASSERT(thread->io_pending == PR_FALSE);
502 /*
503 * This assertion enforces the programming guideline that
504 * if an io function times out or is interrupted, the thread
505 * should close the fd to force the asynch io to abort
506 * before it exits. Right now, closing the fd is the only
507 * way to clear the io_suspended flag.
508 */
509 PR_ASSERT(thread->io_suspended == PR_FALSE);
510
511 PR_Lock(_pr_activeLock);
512 /*
513 * remove thread from list of active threads
514 */
515 if (!(thread->flags & _PR_IDLE_THREAD)) {
516 PR_REMOVE_LINK(&thread->active);
517 _pr_local_threads--;
518 }
519 PR_Unlock(_pr_activeLock);
520 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting"));
521
522 /* All done, time to go away */
523 _PR_CleanupThread(thread);
524
525 _PR_INTSOFF(is);
526
527 _PR_NotifyJoinWaiters(thread);
528
529 _PR_DecrActiveThreadCount(thread);
530
531 thread->state = _PR_DEAD_STATE;
532
533 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) ==
534 PR_FAILURE)) {
535 /*
536 ** Destroy the thread resources
537 */
538 _PR_UserDestroyThread(thread);
539 }
540
541 /*
542 ** Find another user thread to run. This cpu has finished the
543 ** previous threads main and is now ready to run another thread.
544 */
545 {
546 PRInt32 is;
547 _PR_INTSOFF(is);
548 _PR_MD_SWITCH_CONTEXT(thread);
549 }
550
551 /* Will land here when we get scheduled again if we are recycling... */
552 }
553 }
554
_PR_SetThreadPriority(PRThread * thread,PRThreadPriority newPri)555 void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri)
556 {
557 PRThread *me = _PR_MD_CURRENT_THREAD();
558 PRIntn is;
559
560 if ( _PR_IS_NATIVE_THREAD(thread) ) {
561 _PR_MD_SET_PRIORITY(&(thread->md), newPri);
562 return;
563 }
564
565 if (!_PR_IS_NATIVE_THREAD(me)) {
566 _PR_INTSOFF(is);
567 }
568 _PR_THREAD_LOCK(thread);
569 if (newPri != thread->priority) {
570 _PRCPU *cpu = thread->cpu;
571
572 switch (thread->state) {
573 case _PR_RUNNING:
574 /* Change my priority */
575
576 _PR_RUNQ_LOCK(cpu);
577 thread->priority = newPri;
578 if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) {
579 if (!_PR_IS_NATIVE_THREAD(me)) {
580 _PR_SET_RESCHED_FLAG();
581 }
582 }
583 _PR_RUNQ_UNLOCK(cpu);
584 break;
585
586 case _PR_RUNNABLE:
587
588 _PR_RUNQ_LOCK(cpu);
589 /* Move to different runQ */
590 _PR_DEL_RUNQ(thread);
591 thread->priority = newPri;
592 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
593 _PR_ADD_RUNQ(thread, cpu, newPri);
594 _PR_RUNQ_UNLOCK(cpu);
595
596 if (newPri > me->priority) {
597 if (!_PR_IS_NATIVE_THREAD(me)) {
598 _PR_SET_RESCHED_FLAG();
599 }
600 }
601
602 break;
603
604 case _PR_LOCK_WAIT:
605 case _PR_COND_WAIT:
606 case _PR_IO_WAIT:
607 case _PR_SUSPENDED:
608
609 thread->priority = newPri;
610 break;
611 }
612 }
613 _PR_THREAD_UNLOCK(thread);
614 if (!_PR_IS_NATIVE_THREAD(me)) {
615 _PR_INTSON(is);
616 }
617 }
618
619 /*
620 ** Suspend the named thread and copy its gc registers into regBuf
621 */
_PR_Suspend(PRThread * thread)622 static void _PR_Suspend(PRThread *thread)
623 {
624 PRIntn is;
625 PRThread *me = _PR_MD_CURRENT_THREAD();
626
627 PR_ASSERT(thread != me);
628 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu));
629
630 if (!_PR_IS_NATIVE_THREAD(me)) {
631 _PR_INTSOFF(is);
632 }
633 _PR_THREAD_LOCK(thread);
634 switch (thread->state) {
635 case _PR_RUNNABLE:
636 if (!_PR_IS_NATIVE_THREAD(thread)) {
637 _PR_RUNQ_LOCK(thread->cpu);
638 _PR_DEL_RUNQ(thread);
639 _PR_RUNQ_UNLOCK(thread->cpu);
640
641 _PR_MISCQ_LOCK(thread->cpu);
642 _PR_ADD_SUSPENDQ(thread, thread->cpu);
643 _PR_MISCQ_UNLOCK(thread->cpu);
644 } else {
645 /*
646 * Only LOCAL threads are suspended by _PR_Suspend
647 */
648 PR_ASSERT(0);
649 }
650 thread->state = _PR_SUSPENDED;
651 break;
652
653 case _PR_RUNNING:
654 /*
655 * The thread being suspended should be a LOCAL thread with
656 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
657 */
658 PR_ASSERT(0);
659 break;
660
661 case _PR_LOCK_WAIT:
662 case _PR_IO_WAIT:
663 case _PR_COND_WAIT:
664 if (_PR_IS_NATIVE_THREAD(thread)) {
665 _PR_MD_SUSPEND_THREAD(thread);
666 }
667 thread->flags |= _PR_SUSPENDING;
668 break;
669
670 default:
671 PR_Abort();
672 }
673 _PR_THREAD_UNLOCK(thread);
674 if (!_PR_IS_NATIVE_THREAD(me)) {
675 _PR_INTSON(is);
676 }
677 }
678
_PR_Resume(PRThread * thread)679 static void _PR_Resume(PRThread *thread)
680 {
681 PRThreadPriority pri;
682 PRIntn is;
683 PRThread *me = _PR_MD_CURRENT_THREAD();
684
685 if (!_PR_IS_NATIVE_THREAD(me)) {
686 _PR_INTSOFF(is);
687 }
688 _PR_THREAD_LOCK(thread);
689 switch (thread->state) {
690 case _PR_SUSPENDED:
691 thread->state = _PR_RUNNABLE;
692 thread->flags &= ~_PR_SUSPENDING;
693 if (!_PR_IS_NATIVE_THREAD(thread)) {
694 _PR_MISCQ_LOCK(thread->cpu);
695 _PR_DEL_SUSPENDQ(thread);
696 _PR_MISCQ_UNLOCK(thread->cpu);
697
698 pri = thread->priority;
699
700 _PR_RUNQ_LOCK(thread->cpu);
701 _PR_ADD_RUNQ(thread, thread->cpu, pri);
702 _PR_RUNQ_UNLOCK(thread->cpu);
703
704 if (pri > _PR_MD_CURRENT_THREAD()->priority) {
705 if (!_PR_IS_NATIVE_THREAD(me)) {
706 _PR_SET_RESCHED_FLAG();
707 }
708 }
709 } else {
710 PR_ASSERT(0);
711 }
712 break;
713
714 case _PR_IO_WAIT:
715 case _PR_COND_WAIT:
716 thread->flags &= ~_PR_SUSPENDING;
717 /* PR_ASSERT(thread->wait.monitor->stickyCount == 0); */
718 break;
719
720 case _PR_LOCK_WAIT:
721 {
722 PRLock *wLock = thread->wait.lock;
723
724 thread->flags &= ~_PR_SUSPENDING;
725
726 _PR_LOCK_LOCK(wLock);
727 if (thread->wait.lock->owner == 0) {
728 _PR_UnblockLockWaiter(thread->wait.lock);
729 }
730 _PR_LOCK_UNLOCK(wLock);
731 break;
732 }
733 case _PR_RUNNABLE:
734 break;
735 case _PR_RUNNING:
736 /*
737 * The thread being suspended should be a LOCAL thread with
738 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state
739 */
740 PR_ASSERT(0);
741 break;
742
743 default:
744 /*
745 * thread should have been in one of the above-listed blocked states
746 * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE)
747 */
748 PR_Abort();
749 }
750 _PR_THREAD_UNLOCK(thread);
751 if (!_PR_IS_NATIVE_THREAD(me)) {
752 _PR_INTSON(is);
753 }
754
755 }
756
757 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
get_thread(_PRCPU * cpu,PRBool * wakeup_cpus)758 static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus)
759 {
760 PRThread *thread;
761 PRIntn pri;
762 PRUint32 r;
763 PRCList *qp;
764 PRIntn priMin, priMax;
765
766 _PR_RUNQ_LOCK(cpu);
767 r = _PR_RUNQREADYMASK(cpu);
768 if (r==0) {
769 priMin = priMax = PR_PRIORITY_FIRST;
770 } else if (r == (1<<PR_PRIORITY_NORMAL) ) {
771 priMin = priMax = PR_PRIORITY_NORMAL;
772 } else {
773 priMin = PR_PRIORITY_FIRST;
774 priMax = PR_PRIORITY_LAST;
775 }
776 thread = NULL;
777 for (pri = priMax; pri >= priMin ; pri-- ) {
778 if (r & (1 << pri)) {
779 for (qp = _PR_RUNQ(cpu)[pri].next;
780 qp != &_PR_RUNQ(cpu)[pri];
781 qp = qp->next) {
782 thread = _PR_THREAD_PTR(qp);
783 /*
784 * skip non-schedulable threads
785 */
786 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
787 if (thread->no_sched) {
788 thread = NULL;
789 /*
790 * Need to wakeup cpus to avoid missing a
791 * runnable thread
792 * Waking up all CPU's need happen only once.
793 */
794
795 *wakeup_cpus = PR_TRUE;
796 continue;
797 } else if (thread->flags & _PR_BOUND_THREAD) {
798 /*
799 * Thread bound to cpu 0
800 */
801
802 thread = NULL;
803 continue;
804 } else if (thread->io_pending == PR_TRUE) {
805 /*
806 * A thread that is blocked for I/O needs to run
807 * on the same cpu on which it was blocked. This is because
808 * the cpu's ioq is accessed without lock protection and scheduling
809 * the thread on a different cpu would preclude this optimization.
810 */
811 thread = NULL;
812 continue;
813 } else {
814 /* Pull thread off of its run queue */
815 _PR_DEL_RUNQ(thread);
816 _PR_RUNQ_UNLOCK(cpu);
817 return(thread);
818 }
819 }
820 }
821 thread = NULL;
822 }
823 _PR_RUNQ_UNLOCK(cpu);
824 return(thread);
825 }
826 #endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */
827
828 /*
829 ** Schedule this native thread by finding the highest priority nspr
830 ** thread that is ready to run.
831 **
832 ** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls
833 ** PR_Schedule() rather than calling PR_Schedule. Otherwise if there
834 ** is initialization required for switching from SWITCH_CONTEXT,
835 ** it will not get done!
836 */
_PR_Schedule(void)837 void _PR_Schedule(void)
838 {
839 PRThread *thread, *me = _PR_MD_CURRENT_THREAD();
840 _PRCPU *cpu = _PR_MD_CURRENT_CPU();
841 PRIntn pri;
842 PRUint32 r;
843 PRCList *qp;
844 PRIntn priMin, priMax;
845 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
846 PRBool wakeup_cpus;
847 #endif
848
849 /* Interrupts must be disabled */
850 PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0);
851
852 /* Since we are rescheduling, we no longer want to */
853 _PR_CLEAR_RESCHED_FLAG();
854
855 /*
856 ** Find highest priority thread to run. Bigger priority numbers are
857 ** higher priority threads
858 */
859 _PR_RUNQ_LOCK(cpu);
860 /*
861 * if we are in SuspendAll mode, can schedule only the thread
862 * that called PR_SuspendAll
863 *
864 * The thread may be ready to run now, after completing an I/O
865 * operation, for example
866 */
867 if ((thread = suspendAllThread) != 0) {
868 if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) {
869 /* Pull thread off of its run queue */
870 _PR_DEL_RUNQ(thread);
871 _PR_RUNQ_UNLOCK(cpu);
872 goto found_thread;
873 } else {
874 thread = NULL;
875 _PR_RUNQ_UNLOCK(cpu);
876 goto idle_thread;
877 }
878 }
879 r = _PR_RUNQREADYMASK(cpu);
880 if (r==0) {
881 priMin = priMax = PR_PRIORITY_FIRST;
882 } else if (r == (1<<PR_PRIORITY_NORMAL) ) {
883 priMin = priMax = PR_PRIORITY_NORMAL;
884 } else {
885 priMin = PR_PRIORITY_FIRST;
886 priMax = PR_PRIORITY_LAST;
887 }
888 thread = NULL;
889 for (pri = priMax; pri >= priMin ; pri-- ) {
890 if (r & (1 << pri)) {
891 for (qp = _PR_RUNQ(cpu)[pri].next;
892 qp != &_PR_RUNQ(cpu)[pri];
893 qp = qp->next) {
894 thread = _PR_THREAD_PTR(qp);
895 /*
896 * skip non-schedulable threads
897 */
898 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD));
899 if ((thread->no_sched) && (me != thread)) {
900 thread = NULL;
901 continue;
902 } else {
903 /* Pull thread off of its run queue */
904 _PR_DEL_RUNQ(thread);
905 _PR_RUNQ_UNLOCK(cpu);
906 goto found_thread;
907 }
908 }
909 }
910 thread = NULL;
911 }
912 _PR_RUNQ_UNLOCK(cpu);
913
914 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX)
915
916 wakeup_cpus = PR_FALSE;
917 _PR_CPU_LIST_LOCK();
918 for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
919 if (cpu != _PR_CPU_PTR(qp)) {
920 if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus))
921 != NULL) {
922 thread->cpu = cpu;
923 _PR_CPU_LIST_UNLOCK();
924 if (wakeup_cpus == PR_TRUE) {
925 _PR_MD_WAKEUP_CPUS();
926 }
927 goto found_thread;
928 }
929 }
930 }
931 _PR_CPU_LIST_UNLOCK();
932 if (wakeup_cpus == PR_TRUE) {
933 _PR_MD_WAKEUP_CPUS();
934 }
935
936 #endif /* _PR_LOCAL_THREADS_ONLY */
937
938 idle_thread:
939 /*
940 ** There are no threads to run. Switch to the idle thread
941 */
942 PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing"));
943 thread = _PR_MD_CURRENT_CPU()->idle_thread;
944
945 found_thread:
946 PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) &&
947 (!(thread->no_sched))));
948
949 /* Resume the thread */
950 PR_LOG(_pr_sched_lm, PR_LOG_MAX,
951 ("switching to %d[%p]", thread->id, thread));
952 PR_ASSERT(thread->state != _PR_RUNNING);
953 thread->state = _PR_RUNNING;
954
955 /* If we are on the runq, it just means that we went to sleep on some
956 * resource, and by the time we got here another real native thread had
957 * already given us the resource and put us back on the runqueue
958 */
959 PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU());
960 if (thread != me) {
961 _PR_MD_RESTORE_CONTEXT(thread);
962 }
963 #if 0
964 /* XXXMB; with setjmp/longjmp it is impossible to land here, but
965 * it is not with fibers... Is this a bad thing? I believe it is
966 * still safe.
967 */
968 PR_NOT_REACHED("impossible return from schedule");
969 #endif
970 }
971
972 /*
973 ** Attaches a thread.
974 ** Does not set the _PR_MD_CURRENT_THREAD.
975 ** Does not specify the scope of the thread.
976 */
977 static PRThread *
_PR_AttachThread(PRThreadType type,PRThreadPriority priority,PRThreadStack * stack)978 _PR_AttachThread(PRThreadType type, PRThreadPriority priority,
979 PRThreadStack *stack)
980 {
981 PRThread *thread;
982 char *mem;
983
984 if (priority > PR_PRIORITY_LAST) {
985 priority = PR_PRIORITY_LAST;
986 } else if (priority < PR_PRIORITY_FIRST) {
987 priority = PR_PRIORITY_FIRST;
988 }
989
990 mem = (char*) PR_CALLOC(sizeof(PRThread));
991 if (mem) {
992 thread = (PRThread*) mem;
993 thread->priority = priority;
994 thread->stack = stack;
995 thread->state = _PR_RUNNING;
996 PR_INIT_CLIST(&thread->lockList);
997 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
998 PR_DELETE(thread);
999 return 0;
1000 }
1001
1002 return thread;
1003 }
1004 return 0;
1005 }
1006
1007
1008
1009 PR_IMPLEMENT(PRThread*)
_PR_NativeCreateThread(PRThreadType type,void (* start)(void * arg),void * arg,PRThreadPriority priority,PRThreadScope scope,PRThreadState state,PRUint32 stackSize,PRUint32 flags)1010 _PR_NativeCreateThread(PRThreadType type,
1011 void (*start)(void *arg),
1012 void *arg,
1013 PRThreadPriority priority,
1014 PRThreadScope scope,
1015 PRThreadState state,
1016 PRUint32 stackSize,
1017 PRUint32 flags)
1018 {
1019 PRThread *thread;
1020
1021 thread = _PR_AttachThread(type, priority, NULL);
1022
1023 if (thread) {
1024 PR_Lock(_pr_activeLock);
1025 thread->flags = (flags | _PR_GLOBAL_SCOPE);
1026 thread->id = ++_pr_utid;
1027 if (type == PR_SYSTEM_THREAD) {
1028 thread->flags |= _PR_SYSTEM;
1029 _pr_systemActive++;
1030 } else {
1031 _pr_userActive++;
1032 }
1033 PR_Unlock(_pr_activeLock);
1034
1035 thread->stack = PR_NEWZAP(PRThreadStack);
1036 if (!thread->stack) {
1037 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1038 goto done;
1039 }
1040 thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE;
1041 thread->stack->thr = thread;
1042 thread->startFunc = start;
1043 thread->arg = arg;
1044
1045 /*
1046 Set thread flags related to scope and joinable state. If joinable
1047 thread, allocate a "termination" conidition variable.
1048 */
1049 if (state == PR_JOINABLE_THREAD) {
1050 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1051 if (thread->term == NULL) {
1052 PR_DELETE(thread->stack);
1053 goto done;
1054 }
1055 }
1056
1057 thread->state = _PR_RUNNING;
1058 if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority,
1059 scope,state,stackSize) == PR_SUCCESS) {
1060 return thread;
1061 }
1062 if (thread->term) {
1063 PR_DestroyCondVar(thread->term);
1064 thread->term = NULL;
1065 }
1066 PR_DELETE(thread->stack);
1067 }
1068
1069 done:
1070 if (thread) {
1071 _PR_DecrActiveThreadCount(thread);
1072 _PR_DestroyThread(thread);
1073 }
1074 return NULL;
1075 }
1076
1077 /************************************************************************/
1078
_PR_CreateThread(PRThreadType type,void (* start)(void * arg),void * arg,PRThreadPriority priority,PRThreadScope scope,PRThreadState state,PRUint32 stackSize,PRUint32 flags)1079 PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type,
1080 void (*start)(void *arg),
1081 void *arg,
1082 PRThreadPriority priority,
1083 PRThreadScope scope,
1084 PRThreadState state,
1085 PRUint32 stackSize,
1086 PRUint32 flags)
1087 {
1088 PRThread *me;
1089 PRThread *thread = NULL;
1090 PRThreadStack *stack;
1091 char *top;
1092 PRIntn is;
1093 PRIntn native = 0;
1094 PRIntn useRecycled = 0;
1095 PRBool status;
1096
1097 /*
1098 First, pin down the priority. Not all compilers catch passing out of
1099 range enum here. If we let bad values thru, priority queues won't work.
1100 */
1101 if (priority > PR_PRIORITY_LAST) {
1102 priority = PR_PRIORITY_LAST;
1103 } else if (priority < PR_PRIORITY_FIRST) {
1104 priority = PR_PRIORITY_FIRST;
1105 }
1106
1107 if (!_pr_initialized) {
1108 _PR_ImplicitInitialization();
1109 }
1110
1111 if (! (flags & _PR_IDLE_THREAD)) {
1112 me = _PR_MD_CURRENT_THREAD();
1113 }
1114
1115 #if defined(_PR_GLOBAL_THREADS_ONLY)
1116 /*
1117 * can create global threads only
1118 */
1119 if (scope == PR_LOCAL_THREAD) {
1120 scope = PR_GLOBAL_THREAD;
1121 }
1122 #endif
1123
1124 if (_native_threads_only) {
1125 scope = PR_GLOBAL_THREAD;
1126 }
1127
1128 native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD))
1129 && _PR_IS_NATIVE_THREAD_SUPPORTED());
1130
1131 _PR_ADJUST_STACKSIZE(stackSize);
1132
1133 if (native) {
1134 /*
1135 * clear the IDLE_THREAD flag which applies to LOCAL
1136 * threads only
1137 */
1138 flags &= ~_PR_IDLE_THREAD;
1139 flags |= _PR_GLOBAL_SCOPE;
1140 if (_PR_NUM_DEADNATIVE > 0) {
1141 _PR_DEADQ_LOCK;
1142
1143 if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */
1144 _PR_DEADQ_UNLOCK;
1145 } else {
1146 thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next);
1147 PR_REMOVE_LINK(&thread->links);
1148 _PR_DEC_DEADNATIVE;
1149 _PR_DEADQ_UNLOCK;
1150
1151 _PR_InitializeRecycledThread(thread);
1152 thread->startFunc = start;
1153 thread->arg = arg;
1154 thread->flags = (flags | _PR_GLOBAL_SCOPE);
1155 if (type == PR_SYSTEM_THREAD)
1156 {
1157 thread->flags |= _PR_SYSTEM;
1158 PR_ATOMIC_INCREMENT(&_pr_systemActive);
1159 }
1160 else {
1161 PR_ATOMIC_INCREMENT(&_pr_userActive);
1162 }
1163
1164 if (state == PR_JOINABLE_THREAD) {
1165 if (!thread->term) {
1166 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1167 }
1168 }
1169 else {
1170 if(thread->term) {
1171 PR_DestroyCondVar(thread->term);
1172 thread->term = 0;
1173 }
1174 }
1175
1176 thread->priority = priority;
1177 _PR_MD_SET_PRIORITY(&(thread->md), priority);
1178 /* XXX what about stackSize? */
1179 thread->state = _PR_RUNNING;
1180 _PR_MD_WAKEUP_WAITER(thread);
1181 return thread;
1182 }
1183 }
1184 thread = _PR_NativeCreateThread(type, start, arg, priority,
1185 scope, state, stackSize, flags);
1186 } else {
1187 if (_PR_NUM_DEADUSER > 0) {
1188 _PR_DEADQ_LOCK;
1189
1190 if (_PR_NUM_DEADUSER == 0) { /* thread safe check */
1191 _PR_DEADQ_UNLOCK;
1192 } else {
1193 PRCList *ptr;
1194
1195 /* Go down list checking for a recycled thread with a
1196 * large enough stack. XXXMB - this has a bad degenerate case.
1197 */
1198 ptr = _PR_DEADUSERQ.next;
1199 while( ptr != &_PR_DEADUSERQ ) {
1200 thread = _PR_THREAD_PTR(ptr);
1201 if ((thread->stack->stackSize >= stackSize) &&
1202 (!thread->no_sched)) {
1203 PR_REMOVE_LINK(&thread->links);
1204 _PR_DEC_DEADUSER;
1205 break;
1206 } else {
1207 ptr = ptr->next;
1208 thread = NULL;
1209 }
1210 }
1211
1212 _PR_DEADQ_UNLOCK;
1213
1214 if (thread) {
1215 _PR_InitializeRecycledThread(thread);
1216 thread->startFunc = start;
1217 thread->arg = arg;
1218 thread->priority = priority;
1219 if (state == PR_JOINABLE_THREAD) {
1220 if (!thread->term) {
1221 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1222 }
1223 } else {
1224 if(thread->term) {
1225 PR_DestroyCondVar(thread->term);
1226 thread->term = 0;
1227 }
1228 }
1229 useRecycled++;
1230 }
1231 }
1232 }
1233 if (thread == NULL) {
1234 #ifndef HAVE_CUSTOM_USER_THREADS
1235 stack = _PR_NewStack(stackSize);
1236 if (!stack) {
1237 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1238 return NULL;
1239 }
1240
1241 /* Allocate thread object and per-thread data off the top of the stack*/
1242 top = stack->stackTop;
1243 #ifdef HAVE_STACK_GROWING_UP
1244 thread = (PRThread*) top;
1245 top = top + sizeof(PRThread);
1246 /*
1247 * Make stack 64-byte aligned
1248 */
1249 if ((PRUptrdiff)top & 0x3f) {
1250 top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f);
1251 }
1252 #else
1253 top = top - sizeof(PRThread);
1254 thread = (PRThread*) top;
1255 /*
1256 * Make stack 64-byte aligned
1257 */
1258 if ((PRUptrdiff)top & 0x3f) {
1259 top = (char*)((PRUptrdiff)top & ~0x3f);
1260 }
1261 #endif
1262 stack->thr = thread;
1263 memset(thread, 0, sizeof(PRThread));
1264 thread->threadAllocatedOnStack = 1;
1265 #else
1266 thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg);
1267 if (!thread) {
1268 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0);
1269 return NULL;
1270 }
1271 thread->threadAllocatedOnStack = 0;
1272 stack = NULL;
1273 top = NULL;
1274 #endif
1275
1276 /* Initialize thread */
1277 thread->tpdLength = 0;
1278 thread->privateData = NULL;
1279 thread->stack = stack;
1280 thread->priority = priority;
1281 thread->startFunc = start;
1282 thread->arg = arg;
1283 PR_INIT_CLIST(&thread->lockList);
1284
1285 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) {
1286 if (thread->threadAllocatedOnStack == 1) {
1287 _PR_FreeStack(thread->stack);
1288 }
1289 else {
1290 PR_DELETE(thread);
1291 }
1292 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1293 return NULL;
1294 }
1295
1296 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) {
1297 if (thread->threadAllocatedOnStack == 1) {
1298 _PR_FreeStack(thread->stack);
1299 }
1300 else {
1301 PR_DELETE(thread->privateData);
1302 PR_DELETE(thread);
1303 }
1304 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0);
1305 return NULL;
1306 }
1307
1308 _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status);
1309
1310 if (status == PR_FALSE) {
1311 _PR_MD_FREE_LOCK(&thread->threadLock);
1312 if (thread->threadAllocatedOnStack == 1) {
1313 _PR_FreeStack(thread->stack);
1314 }
1315 else {
1316 PR_DELETE(thread->privateData);
1317 PR_DELETE(thread);
1318 }
1319 return NULL;
1320 }
1321
1322 /*
1323 Set thread flags related to scope and joinable state. If joinable
1324 thread, allocate a "termination" condition variable.
1325 */
1326 if (state == PR_JOINABLE_THREAD) {
1327 thread->term = PR_NewCondVar(_pr_terminationCVLock);
1328 if (thread->term == NULL) {
1329 _PR_MD_FREE_LOCK(&thread->threadLock);
1330 if (thread->threadAllocatedOnStack == 1) {
1331 _PR_FreeStack(thread->stack);
1332 }
1333 else {
1334 PR_DELETE(thread->privateData);
1335 PR_DELETE(thread);
1336 }
1337 return NULL;
1338 }
1339 }
1340
1341 }
1342
1343 /* Update thread type counter */
1344 PR_Lock(_pr_activeLock);
1345 thread->flags = flags;
1346 thread->id = ++_pr_utid;
1347 if (type == PR_SYSTEM_THREAD) {
1348 thread->flags |= _PR_SYSTEM;
1349 _pr_systemActive++;
1350 } else {
1351 _pr_userActive++;
1352 }
1353
1354 /* Make thread runnable */
1355 thread->state = _PR_RUNNABLE;
1356 /*
1357 * Add to list of active threads
1358 */
1359 PR_Unlock(_pr_activeLock);
1360
1361 if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) ) {
1362 thread->cpu = _PR_GetPrimordialCPU();
1363 }
1364 else {
1365 thread->cpu = _PR_MD_CURRENT_CPU();
1366 }
1367
1368 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1369
1370 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me)) {
1371 _PR_INTSOFF(is);
1372 _PR_RUNQ_LOCK(thread->cpu);
1373 _PR_ADD_RUNQ(thread, thread->cpu, priority);
1374 _PR_RUNQ_UNLOCK(thread->cpu);
1375 }
1376
1377 if (thread->flags & _PR_IDLE_THREAD) {
1378 /*
1379 ** If the creating thread is a kernel thread, we need to
1380 ** awaken the user thread idle thread somehow; potentially
1381 ** it could be sleeping in its idle loop, and we need to poke
1382 ** it. To do so, wake the idle thread...
1383 */
1384 _PR_MD_WAKEUP_WAITER(NULL);
1385 } else if (_PR_IS_NATIVE_THREAD(me)) {
1386 _PR_MD_WAKEUP_WAITER(thread);
1387 }
1388 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) ) {
1389 _PR_INTSON(is);
1390 }
1391 }
1392
1393 return thread;
1394 }
1395
PR_CreateThread(PRThreadType type,void (* start)(void * arg),void * arg,PRThreadPriority priority,PRThreadScope scope,PRThreadState state,PRUint32 stackSize)1396 PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type,
1397 void (*start)(void *arg),
1398 void *arg,
1399 PRThreadPriority priority,
1400 PRThreadScope scope,
1401 PRThreadState state,
1402 PRUint32 stackSize)
1403 {
1404 return _PR_CreateThread(type, start, arg, priority, scope, state,
1405 stackSize, 0);
1406 }
1407
1408 /*
1409 ** Associate a thread object with an existing native thread.
1410 ** "type" is the type of thread object to attach
1411 ** "priority" is the priority to assign to the thread
1412 ** "stack" defines the shape of the threads stack
1413 **
1414 ** This can return NULL if some kind of error occurs, or if memory is
1415 ** tight.
1416 **
1417 ** This call is not normally needed unless you create your own native
1418 ** thread. PR_Init does this automatically for the primordial thread.
1419 */
_PRI_AttachThread(PRThreadType type,PRThreadPriority priority,PRThreadStack * stack,PRUint32 flags)1420 PRThread* _PRI_AttachThread(PRThreadType type,
1421 PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags)
1422 {
1423 PRThread *thread;
1424
1425 if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) {
1426 return thread;
1427 }
1428 _PR_MD_SET_CURRENT_THREAD(NULL);
1429
1430 /* Clear out any state if this thread was attached before */
1431 _PR_MD_SET_CURRENT_CPU(NULL);
1432
1433 thread = _PR_AttachThread(type, priority, stack);
1434 if (thread) {
1435 PRIntn is;
1436
1437 _PR_MD_SET_CURRENT_THREAD(thread);
1438
1439 thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED;
1440
1441 if (!stack) {
1442 thread->stack = PR_NEWZAP(PRThreadStack);
1443 if (!thread->stack) {
1444 _PR_DestroyThread(thread);
1445 return NULL;
1446 }
1447 thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE;
1448 }
1449 PR_INIT_CLIST(&thread->links);
1450
1451 if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) {
1452 PR_DELETE(thread->stack);
1453 _PR_DestroyThread(thread);
1454 return NULL;
1455 }
1456
1457 _PR_MD_SET_CURRENT_CPU(NULL);
1458
1459 if (_PR_MD_CURRENT_CPU()) {
1460 _PR_INTSOFF(is);
1461 PR_Lock(_pr_activeLock);
1462 }
1463 if (type == PR_SYSTEM_THREAD) {
1464 thread->flags |= _PR_SYSTEM;
1465 _pr_systemActive++;
1466 } else {
1467 _pr_userActive++;
1468 }
1469 if (_PR_MD_CURRENT_CPU()) {
1470 PR_Unlock(_pr_activeLock);
1471 _PR_INTSON(is);
1472 }
1473 }
1474 return thread;
1475 }
1476
PR_AttachThread(PRThreadType type,PRThreadPriority priority,PRThreadStack * stack)1477 PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type,
1478 PRThreadPriority priority, PRThreadStack *stack)
1479 {
1480 return PR_GetCurrentThread();
1481 }
1482
PR_DetachThread(void)1483 PR_IMPLEMENT(void) PR_DetachThread(void)
1484 {
1485 /*
1486 * On Solaris, and Windows, foreign threads are detached when
1487 * they terminate.
1488 */
1489 #if !defined(WIN32) \
1490 && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY))
1491 PRThread *me;
1492 if (_pr_initialized) {
1493 me = _PR_MD_GET_ATTACHED_THREAD();
1494 if ((me != NULL) && (me->flags & _PR_ATTACHED)) {
1495 _PRI_DetachThread();
1496 }
1497 }
1498 #endif
1499 }
1500
_PRI_DetachThread(void)1501 void _PRI_DetachThread(void)
1502 {
1503 PRThread *me = _PR_MD_CURRENT_THREAD();
1504
1505 if (me->flags & _PR_PRIMORDIAL) {
1506 /*
1507 * ignore, if primordial thread
1508 */
1509 return;
1510 }
1511 PR_ASSERT(me->flags & _PR_ATTACHED);
1512 PR_ASSERT(_PR_IS_NATIVE_THREAD(me));
1513 _PR_CleanupThread(me);
1514 PR_DELETE(me->privateData);
1515
1516 _PR_DecrActiveThreadCount(me);
1517
1518 _PR_MD_CLEAN_THREAD(me);
1519 _PR_MD_SET_CURRENT_THREAD(NULL);
1520 if (!me->threadAllocatedOnStack) {
1521 PR_DELETE(me->stack);
1522 }
1523 _PR_MD_FREE_LOCK(&me->threadLock);
1524 PR_DELETE(me);
1525 }
1526
1527 /*
1528 ** Wait for thread termination:
1529 ** "thread" is the target thread
1530 **
1531 ** This can return PR_FAILURE if no joinable thread could be found
1532 ** corresponding to the specified target thread.
1533 **
1534 ** The calling thread is suspended until the target thread completes.
1535 ** Several threads cannot wait for the same thread to complete; one thread
1536 ** will complete successfully and others will terminate with an error PR_FAILURE.
1537 ** The calling thread will not be blocked if the target thread has already
1538 ** terminated.
1539 */
PR_JoinThread(PRThread * thread)1540 PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread)
1541 {
1542 PRIntn is;
1543 PRCondVar *term;
1544 PRThread *me = _PR_MD_CURRENT_THREAD();
1545
1546 if (!_PR_IS_NATIVE_THREAD(me)) {
1547 _PR_INTSOFF(is);
1548 }
1549 term = thread->term;
1550 /* can't join a non-joinable thread */
1551 if (term == NULL) {
1552 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1553 goto ErrorExit;
1554 }
1555
1556 /* multiple threads can't wait on the same joinable thread */
1557 if (term->condQ.next != &term->condQ) {
1558 goto ErrorExit;
1559 }
1560 if (!_PR_IS_NATIVE_THREAD(me)) {
1561 _PR_INTSON(is);
1562 }
1563
1564 /* wait for the target thread's termination cv invariant */
1565 PR_Lock (_pr_terminationCVLock);
1566 while (thread->state != _PR_JOIN_WAIT) {
1567 (void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT);
1568 }
1569 (void) PR_Unlock (_pr_terminationCVLock);
1570
1571 /*
1572 Remove target thread from global waiting to join Q; make it runnable
1573 again and put it back on its run Q. When it gets scheduled later in
1574 _PR_RunThread code, it will clean up its stack.
1575 */
1576 if (!_PR_IS_NATIVE_THREAD(me)) {
1577 _PR_INTSOFF(is);
1578 }
1579 thread->state = _PR_RUNNABLE;
1580 if ( !_PR_IS_NATIVE_THREAD(thread) ) {
1581 _PR_THREAD_LOCK(thread);
1582
1583 _PR_MISCQ_LOCK(thread->cpu);
1584 _PR_DEL_JOINQ(thread);
1585 _PR_MISCQ_UNLOCK(thread->cpu);
1586
1587 _PR_AddThreadToRunQ(me, thread);
1588 _PR_THREAD_UNLOCK(thread);
1589 }
1590 if (!_PR_IS_NATIVE_THREAD(me)) {
1591 _PR_INTSON(is);
1592 }
1593
1594 _PR_MD_WAKEUP_WAITER(thread);
1595
1596 return PR_SUCCESS;
1597
1598 ErrorExit:
1599 if ( !_PR_IS_NATIVE_THREAD(me)) {
1600 _PR_INTSON(is);
1601 }
1602 return PR_FAILURE;
1603 }
1604
PR_SetThreadPriority(PRThread * thread,PRThreadPriority newPri)1605 PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread,
1606 PRThreadPriority newPri)
1607 {
1608
1609 /*
1610 First, pin down the priority. Not all compilers catch passing out of
1611 range enum here. If we let bad values thru, priority queues won't work.
1612 */
1613 if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) {
1614 newPri = PR_PRIORITY_LAST;
1615 } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) {
1616 newPri = PR_PRIORITY_FIRST;
1617 }
1618
1619 if ( _PR_IS_NATIVE_THREAD(thread) ) {
1620 thread->priority = newPri;
1621 _PR_MD_SET_PRIORITY(&(thread->md), newPri);
1622 } else {
1623 _PR_SetThreadPriority(thread, newPri);
1624 }
1625 }
1626
PR_SetCurrentThreadName(const char * name)1627 PR_IMPLEMENT(PRStatus) PR_SetCurrentThreadName(const char *name)
1628 {
1629 PRThread *thread;
1630 size_t nameLen;
1631
1632 if (!name) {
1633 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0);
1634 return PR_FAILURE;
1635 }
1636
1637 thread = PR_GetCurrentThread();
1638 if (!thread) {
1639 return PR_FAILURE;
1640 }
1641
1642 PR_Free(thread->name);
1643 nameLen = strlen(name);
1644 thread->name = (char *)PR_Malloc(nameLen + 1);
1645 if (!thread->name) {
1646 return PR_FAILURE;
1647 }
1648 memcpy(thread->name, name, nameLen + 1);
1649 _PR_MD_SET_CURRENT_THREAD_NAME(thread->name);
1650 return PR_SUCCESS;
1651 }
1652
PR_GetThreadName(const PRThread * thread)1653 PR_IMPLEMENT(const char *) PR_GetThreadName(const PRThread *thread)
1654 {
1655 if (!thread) {
1656 return NULL;
1657 }
1658 return thread->name;
1659 }
1660
1661
1662 /*
1663 ** This routine prevents all other threads from running. This call is needed by
1664 ** the garbage collector.
1665 */
PR_SuspendAll(void)1666 PR_IMPLEMENT(void) PR_SuspendAll(void)
1667 {
1668 PRThread *me = _PR_MD_CURRENT_THREAD();
1669 PRCList *qp;
1670
1671 /*
1672 * Stop all user and native threads which are marked GC able.
1673 */
1674 PR_Lock(_pr_activeLock);
1675 suspendAllOn = PR_TRUE;
1676 suspendAllThread = _PR_MD_CURRENT_THREAD();
1677 _PR_MD_BEGIN_SUSPEND_ALL();
1678 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1679 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1680 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1681 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
1682 _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp));
1683 PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING);
1684 }
1685 }
1686 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1687 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1688 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1689 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp)))
1690 /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */
1691 {
1692 _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
1693 }
1694 }
1695 _PR_MD_END_SUSPEND_ALL();
1696 }
1697
1698 /*
1699 ** This routine unblocks all other threads that were suspended from running by
1700 ** PR_SuspendAll(). This call is needed by the garbage collector.
1701 */
PR_ResumeAll(void)1702 PR_IMPLEMENT(void) PR_ResumeAll(void)
1703 {
1704 PRThread *me = _PR_MD_CURRENT_THREAD();
1705 PRCList *qp;
1706
1707 /*
1708 * Resume all user and native threads which are marked GC able.
1709 */
1710 _PR_MD_BEGIN_RESUME_ALL();
1711 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1712 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) {
1713 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1714 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
1715 _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp));
1716 }
1717 }
1718 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1719 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) {
1720 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) &&
1721 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) {
1722 _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp));
1723 }
1724 }
1725 _PR_MD_END_RESUME_ALL();
1726 suspendAllThread = NULL;
1727 suspendAllOn = PR_FALSE;
1728 PR_Unlock(_pr_activeLock);
1729 }
1730
PR_EnumerateThreads(PREnumerator func,void * arg)1731 PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg)
1732 {
1733 PRCList *qp, *qp_next;
1734 PRIntn i = 0;
1735 PRStatus rv = PR_SUCCESS;
1736 PRThread* t;
1737
1738 /*
1739 ** Currently Enumerate threads happen only with suspension and
1740 ** pr_activeLock held
1741 */
1742 PR_ASSERT(suspendAllOn);
1743
1744 /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking
1745 * qp->next after applying the function "func". In particular, "func"
1746 * might remove the thread from the queue and put it into another one in
1747 * which case qp->next no longer points to the next entry in the original
1748 * queue.
1749 *
1750 * To get around this problem, we save qp->next in qp_next before applying
1751 * "func" and use that saved value as the next value after applying "func".
1752 */
1753
1754 /*
1755 * Traverse the list of local and global threads
1756 */
1757 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next;
1758 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next)
1759 {
1760 qp_next = qp->next;
1761 t = _PR_ACTIVE_THREAD_PTR(qp);
1762 if (_PR_IS_GCABLE_THREAD(t))
1763 {
1764 rv = (*func)(t, i, arg);
1765 if (rv != PR_SUCCESS) {
1766 return rv;
1767 }
1768 i++;
1769 }
1770 }
1771 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next;
1772 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next)
1773 {
1774 qp_next = qp->next;
1775 t = _PR_ACTIVE_THREAD_PTR(qp);
1776 if (_PR_IS_GCABLE_THREAD(t))
1777 {
1778 rv = (*func)(t, i, arg);
1779 if (rv != PR_SUCCESS) {
1780 return rv;
1781 }
1782 i++;
1783 }
1784 }
1785 return rv;
1786 }
1787
1788 /* FUNCTION: _PR_AddSleepQ
1789 ** DESCRIPTION:
1790 ** Adds a thread to the sleep/pauseQ.
1791 ** RESTRICTIONS:
1792 ** Caller must have the RUNQ lock.
1793 ** Caller must be a user level thread
1794 */
1795 PR_IMPLEMENT(void)
_PR_AddSleepQ(PRThread * thread,PRIntervalTime timeout)1796 _PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout)
1797 {
1798 _PRCPU *cpu = thread->cpu;
1799
1800 if (timeout == PR_INTERVAL_NO_TIMEOUT) {
1801 /* append the thread to the global pause Q */
1802 PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu));
1803 thread->flags |= _PR_ON_PAUSEQ;
1804 } else {
1805 PRIntervalTime sleep;
1806 PRCList *q;
1807 PRThread *t;
1808
1809 /* sort onto global sleepQ */
1810 sleep = timeout;
1811
1812 /* Check if we are longest timeout */
1813 if (timeout >= _PR_SLEEPQMAX(cpu)) {
1814 PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu));
1815 thread->sleep = timeout - _PR_SLEEPQMAX(cpu);
1816 _PR_SLEEPQMAX(cpu) = timeout;
1817 } else {
1818 /* Sort thread into global sleepQ at appropriate point */
1819 q = _PR_SLEEPQ(cpu).next;
1820
1821 /* Now scan the list for where to insert this entry */
1822 while (q != &_PR_SLEEPQ(cpu)) {
1823 t = _PR_THREAD_PTR(q);
1824 if (sleep < t->sleep) {
1825 /* Found sleeper to insert in front of */
1826 break;
1827 }
1828 sleep -= t->sleep;
1829 q = q->next;
1830 }
1831 thread->sleep = sleep;
1832 PR_INSERT_BEFORE(&thread->links, q);
1833
1834 /*
1835 ** Subtract our sleep time from the sleeper that follows us (there
1836 ** must be one) so that they remain relative to us.
1837 */
1838 PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu));
1839
1840 t = _PR_THREAD_PTR(thread->links.next);
1841 PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread);
1842 t->sleep -= sleep;
1843 }
1844
1845 thread->flags |= _PR_ON_SLEEPQ;
1846 }
1847 }
1848
1849 /* FUNCTION: _PR_DelSleepQ
1850 ** DESCRIPTION:
1851 ** Removes a thread from the sleep/pauseQ.
1852 ** INPUTS:
1853 ** If propogate_time is true, then the thread following the deleted
1854 ** thread will be get the time from the deleted thread. This is used
1855 ** when deleting a sleeper that has not timed out.
1856 ** RESTRICTIONS:
1857 ** Caller must have the RUNQ lock.
1858 ** Caller must be a user level thread
1859 */
1860 PR_IMPLEMENT(void)
_PR_DelSleepQ(PRThread * thread,PRBool propogate_time)1861 _PR_DelSleepQ(PRThread *thread, PRBool propogate_time)
1862 {
1863 _PRCPU *cpu = thread->cpu;
1864
1865 /* Remove from pauseQ/sleepQ */
1866 if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) {
1867 if (thread->flags & _PR_ON_SLEEPQ) {
1868 PRCList *q = thread->links.next;
1869 if (q != &_PR_SLEEPQ(cpu)) {
1870 if (propogate_time == PR_TRUE) {
1871 PRThread *after = _PR_THREAD_PTR(q);
1872 after->sleep += thread->sleep;
1873 } else {
1874 _PR_SLEEPQMAX(cpu) -= thread->sleep;
1875 }
1876 } else {
1877 /* Check if prev is the beggining of the list; if so,
1878 * we are the only element on the list.
1879 */
1880 if (thread->links.prev != &_PR_SLEEPQ(cpu)) {
1881 _PR_SLEEPQMAX(cpu) -= thread->sleep;
1882 }
1883 else {
1884 _PR_SLEEPQMAX(cpu) = 0;
1885 }
1886 }
1887 thread->flags &= ~_PR_ON_SLEEPQ;
1888 } else {
1889 thread->flags &= ~_PR_ON_PAUSEQ;
1890 }
1891 PR_REMOVE_LINK(&thread->links);
1892 } else {
1893 PR_ASSERT(0);
1894 }
1895 }
1896
1897 void
_PR_AddThreadToRunQ(PRThread * me,PRThread * thread)1898 _PR_AddThreadToRunQ(
1899 PRThread *me, /* the current thread */
1900 PRThread *thread) /* the local thread to be added to a run queue */
1901 {
1902 PRThreadPriority pri = thread->priority;
1903 _PRCPU *cpu = thread->cpu;
1904
1905 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread));
1906
1907 #if defined(WINNT)
1908 /*
1909 * On NT, we can only reliably know that the current CPU
1910 * is not idle. We add the awakened thread to the run
1911 * queue of its CPU if its CPU is the current CPU.
1912 * For any other CPU, we don't really know whether it
1913 * is busy or idle. So in all other cases, we just
1914 * "post" the awakened thread to the IO completion port
1915 * for the next idle CPU to execute (this is done in
1916 * _PR_MD_WAKEUP_WAITER).
1917 * Threads with a suspended I/O operation remain bound to
1918 * the same cpu until I/O is cancelled
1919 *
1920 * NOTE: the boolean expression below must be the exact
1921 * opposite of the corresponding boolean expression in
1922 * _PR_MD_WAKEUP_WAITER.
1923 */
1924 if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) ||
1925 (thread->md.thr_bound_cpu)) {
1926 PR_ASSERT(!thread->md.thr_bound_cpu ||
1927 (thread->md.thr_bound_cpu == cpu));
1928 _PR_RUNQ_LOCK(cpu);
1929 _PR_ADD_RUNQ(thread, cpu, pri);
1930 _PR_RUNQ_UNLOCK(cpu);
1931 }
1932 #else
1933 _PR_RUNQ_LOCK(cpu);
1934 _PR_ADD_RUNQ(thread, cpu, pri);
1935 _PR_RUNQ_UNLOCK(cpu);
1936 if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) {
1937 if (pri > me->priority) {
1938 _PR_SET_RESCHED_FLAG();
1939 }
1940 }
1941 #endif
1942 }
1943