1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: GPL - See COPYING in the top level directory 4 * FILE: ntoskrnl/include/internal/ke_x.h 5 * PURPOSE: Internal Inlined Functions for the Kernel 6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org) 7 */ 8 9 #ifndef _M_ARM 10 FORCEINLINE 11 KPROCESSOR_MODE 12 KeGetPreviousMode(VOID) 13 { 14 /* Return the current mode */ 15 return KeGetCurrentThread()->PreviousMode; 16 } 17 #endif 18 19 // 20 // Enters a Guarded Region 21 // 22 #define KeEnterGuardedRegionThread(_Thread) \ 23 { \ 24 /* Sanity checks */ \ 25 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \ 26 ASSERT(_Thread == KeGetCurrentThread()); \ 27 ASSERT((_Thread->SpecialApcDisable <= 0) && \ 28 (_Thread->SpecialApcDisable != -32768)); \ 29 \ 30 /* Disable Special APCs */ \ 31 _Thread->SpecialApcDisable--; \ 32 } 33 34 #define KeEnterGuardedRegion() \ 35 { \ 36 PKTHREAD _Thread = KeGetCurrentThread(); \ 37 KeEnterGuardedRegionThread(_Thread); \ 38 } 39 40 // 41 // Leaves a Guarded Region 42 // 43 #define KeLeaveGuardedRegionThread(_Thread) \ 44 { \ 45 /* Sanity checks */ \ 46 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \ 47 ASSERT(_Thread == KeGetCurrentThread()); \ 48 ASSERT(_Thread->SpecialApcDisable < 0); \ 49 \ 50 /* Leave region and check if APCs are OK now */ \ 51 if (!(++_Thread->SpecialApcDisable)) \ 52 { \ 53 /* Check for Kernel APCs on the list */ \ 54 if (!IsListEmpty(&_Thread->ApcState. \ 55 ApcListHead[KernelMode])) \ 56 { \ 57 /* Check for APC Delivery */ \ 58 KiCheckForKernelApcDelivery(); \ 59 } \ 60 } \ 61 } 62 63 #define KeLeaveGuardedRegion() \ 64 { \ 65 PKTHREAD _Thread = KeGetCurrentThread(); \ 66 KeLeaveGuardedRegionThread(_Thread); \ 67 } 68 69 // 70 // Enters a Critical Region 71 // 72 #define KeEnterCriticalRegionThread(_Thread) \ 73 { \ 74 /* Sanity checks */ \ 75 ASSERT(_Thread == KeGetCurrentThread()); \ 76 ASSERT((_Thread->KernelApcDisable <= 0) && \ 77 (_Thread->KernelApcDisable != -32768)); \ 78 \ 79 /* Disable Kernel APCs */ \ 80 _Thread->KernelApcDisable--; \ 81 } 82 83 #define KeEnterCriticalRegion() \ 84 { \ 85 PKTHREAD _Thread = KeGetCurrentThread(); \ 86 KeEnterCriticalRegionThread(_Thread); \ 87 } 88 89 // 90 // Leaves a Critical Region 91 // 92 #define KeLeaveCriticalRegionThread(_Thread) \ 93 { \ 94 /* Sanity checks */ \ 95 ASSERT(_Thread == KeGetCurrentThread()); \ 96 ASSERT(_Thread->KernelApcDisable < 0); \ 97 \ 98 /* Enable Kernel APCs */ \ 99 _Thread->KernelApcDisable++; \ 100 \ 101 /* Check if Kernel APCs are now enabled */ \ 102 if (!(_Thread->KernelApcDisable)) \ 103 { \ 104 /* Check if we need to request an APC Delivery */ \ 105 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \ 106 !(_Thread->SpecialApcDisable)) \ 107 { \ 108 /* Check for the right environment */ \ 109 KiCheckForKernelApcDelivery(); \ 110 } \ 111 } \ 112 } 113 114 #define KeLeaveCriticalRegion() \ 115 { \ 116 PKTHREAD _Thread = KeGetCurrentThread(); \ 117 KeLeaveCriticalRegionThread(_Thread); \ 118 } 119 120 #ifndef CONFIG_SMP 121 122 // 123 // This routine protects against multiple CPU acquires, it's meaningless on UP. 124 // 125 FORCEINLINE 126 VOID 127 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object) 128 { 129 UNREFERENCED_PARAMETER(Object); 130 } 131 132 // 133 // This routine protects against multiple CPU acquires, it's meaningless on UP. 134 // 135 FORCEINLINE 136 VOID 137 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object) 138 { 139 UNREFERENCED_PARAMETER(Object); 140 } 141 142 FORCEINLINE 143 KIRQL 144 KiAcquireDispatcherLock(VOID) 145 { 146 /* Raise to synch level */ 147 return KfRaiseIrql(SYNCH_LEVEL); 148 } 149 150 FORCEINLINE 151 VOID 152 KiReleaseDispatcherLock(IN KIRQL OldIrql) 153 { 154 /* Just exit the dispatcher */ 155 KiExitDispatcher(OldIrql); 156 } 157 158 FORCEINLINE 159 VOID 160 KiAcquireDispatcherLockAtSynchLevel(VOID) 161 { 162 /* This is a no-op at SYNCH_LEVEL for UP systems */ 163 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL); 164 return; 165 } 166 167 FORCEINLINE 168 VOID 169 KiReleaseDispatcherLockFromSynchLevel(VOID) 170 { 171 /* This is a no-op at SYNCH_LEVEL for UP systems */ 172 return; 173 } 174 175 // 176 // This routine makes the thread deferred ready on the boot CPU. 177 // 178 FORCEINLINE 179 VOID 180 KiInsertDeferredReadyList(IN PKTHREAD Thread) 181 { 182 /* Set the thread to deferred state and boot CPU */ 183 Thread->State = DeferredReady; 184 Thread->DeferredProcessor = 0; 185 186 /* Make the thread ready immediately */ 187 KiDeferredReadyThread(Thread); 188 } 189 190 FORCEINLINE 191 VOID 192 KiRescheduleThread(IN BOOLEAN NewThread, 193 IN ULONG Cpu) 194 { 195 /* This is meaningless on UP systems */ 196 UNREFERENCED_PARAMETER(NewThread); 197 UNREFERENCED_PARAMETER(Cpu); 198 } 199 200 // 201 // This routine protects against multiple CPU acquires, it's meaningless on UP. 202 // 203 FORCEINLINE 204 VOID 205 KiSetThreadSwapBusy(IN PKTHREAD Thread) 206 { 207 UNREFERENCED_PARAMETER(Thread); 208 } 209 210 // 211 // This routine protects against multiple CPU acquires, it's meaningless on UP. 212 // 213 FORCEINLINE 214 VOID 215 KiAcquirePrcbLock(IN PKPRCB Prcb) 216 { 217 UNREFERENCED_PARAMETER(Prcb); 218 } 219 220 // 221 // This routine protects against multiple CPU acquires, it's meaningless on UP. 222 // 223 FORCEINLINE 224 VOID 225 KiReleasePrcbLock(IN PKPRCB Prcb) 226 { 227 UNREFERENCED_PARAMETER(Prcb); 228 } 229 230 // 231 // This routine protects against multiple CPU acquires, it's meaningless on UP. 232 // 233 FORCEINLINE 234 VOID 235 KiAcquireThreadLock(IN PKTHREAD Thread) 236 { 237 UNREFERENCED_PARAMETER(Thread); 238 } 239 240 // 241 // This routine protects against multiple CPU acquires, it's meaningless on UP. 242 // 243 FORCEINLINE 244 VOID 245 KiReleaseThreadLock(IN PKTHREAD Thread) 246 { 247 UNREFERENCED_PARAMETER(Thread); 248 } 249 250 // 251 // This routine protects against multiple CPU acquires, it's meaningless on UP. 252 // 253 FORCEINLINE 254 BOOLEAN 255 KiTryThreadLock(IN PKTHREAD Thread) 256 { 257 UNREFERENCED_PARAMETER(Thread); 258 return FALSE; 259 } 260 261 FORCEINLINE 262 VOID 263 KiCheckDeferredReadyList(IN PKPRCB Prcb) 264 { 265 /* There are no deferred ready lists on UP systems */ 266 UNREFERENCED_PARAMETER(Prcb); 267 } 268 269 FORCEINLINE 270 VOID 271 KiRequestApcInterrupt(IN BOOLEAN NeedApc, 272 IN UCHAR Processor) 273 { 274 /* We deliver instantly on UP */ 275 UNREFERENCED_PARAMETER(NeedApc); 276 UNREFERENCED_PARAMETER(Processor); 277 } 278 279 FORCEINLINE 280 PKSPIN_LOCK_QUEUE 281 KiAcquireTimerLock(IN ULONG Hand) 282 { 283 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 284 285 /* Nothing to do on UP */ 286 UNREFERENCED_PARAMETER(Hand); 287 return NULL; 288 } 289 290 FORCEINLINE 291 VOID 292 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue) 293 { 294 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 295 296 /* Nothing to do on UP */ 297 UNREFERENCED_PARAMETER(LockQueue); 298 } 299 300 #else 301 302 FORCEINLINE 303 VOID 304 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object) 305 { 306 LONG OldValue; 307 308 /* Make sure we're at a safe level to touch the lock */ 309 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 310 311 /* Start acquire loop */ 312 do 313 { 314 /* Loop until the other CPU releases it */ 315 while (TRUE) 316 { 317 /* Check if it got released */ 318 OldValue = Object->Lock; 319 if ((OldValue & KOBJECT_LOCK_BIT) == 0) break; 320 321 /* Let the CPU know that this is a loop */ 322 YieldProcessor(); 323 } 324 325 /* Try acquiring the lock now */ 326 } while (InterlockedCompareExchange(&Object->Lock, 327 OldValue | KOBJECT_LOCK_BIT, 328 OldValue) != OldValue); 329 } 330 331 FORCEINLINE 332 VOID 333 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object) 334 { 335 /* Make sure we're at a safe level to touch the lock */ 336 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 337 338 /* Release it */ 339 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT); 340 } 341 342 FORCEINLINE 343 KIRQL 344 KiAcquireDispatcherLock(VOID) 345 { 346 /* Raise to synchronization level and acquire the dispatcher lock */ 347 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock); 348 } 349 350 FORCEINLINE 351 VOID 352 KiReleaseDispatcherLock(IN KIRQL OldIrql) 353 { 354 /* First release the lock */ 355 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()-> 356 LockQueue[LockQueueDispatcherLock]); 357 358 /* Then exit the dispatcher */ 359 KiExitDispatcher(OldIrql); 360 } 361 362 FORCEINLINE 363 VOID 364 KiAcquireDispatcherLockAtSynchLevel(VOID) 365 { 366 /* Acquire the dispatcher lock */ 367 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL); 368 KeAcquireQueuedSpinLockAtDpcLevel(&KeGetCurrentPrcb()-> 369 LockQueue[LockQueueDispatcherLock]); 370 } 371 372 FORCEINLINE 373 VOID 374 KiReleaseDispatcherLockFromSynchLevel(VOID) 375 { 376 /* Release the dispatcher lock */ 377 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()-> 378 LockQueue[LockQueueDispatcherLock]); 379 } 380 381 // 382 // This routine inserts a thread into the deferred ready list of the current CPU 383 // 384 FORCEINLINE 385 VOID 386 KiInsertDeferredReadyList(IN PKTHREAD Thread) 387 { 388 PKPRCB Prcb = KeGetCurrentPrcb(); 389 390 /* Set the thread to deferred state and CPU */ 391 Thread->State = DeferredReady; 392 Thread->DeferredProcessor = Prcb->Number; 393 394 /* Add it on the list */ 395 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry); 396 } 397 398 FORCEINLINE 399 VOID 400 KiRescheduleThread(IN BOOLEAN NewThread, 401 IN ULONG Cpu) 402 { 403 /* Check if a new thread needs to be scheduled on a different CPU */ 404 if ((NewThread) && !(KeGetCurrentPrcb()->Number == Cpu)) 405 { 406 /* Send an IPI to request delivery */ 407 KiIpiSend(AFFINITY_MASK(Cpu), IPI_DPC); 408 } 409 } 410 411 // 412 // This routine sets the current thread in a swap busy state, which ensure that 413 // nobody else tries to swap it concurrently. 414 // 415 FORCEINLINE 416 VOID 417 KiSetThreadSwapBusy(IN PKTHREAD Thread) 418 { 419 /* Make sure nobody already set it */ 420 ASSERT(Thread->SwapBusy == FALSE); 421 422 /* Set it ourselves */ 423 Thread->SwapBusy = TRUE; 424 } 425 426 // 427 // This routine acquires the PRCB lock so that only one caller can touch 428 // volatile PRCB data. 429 // 430 // Since this is a simple optimized spin-lock, it must only be acquired 431 // at dispatcher level or higher! 432 // 433 FORCEINLINE 434 VOID 435 KiAcquirePrcbLock(IN PKPRCB Prcb) 436 { 437 /* Make sure we're at a safe level to touch the PRCB lock */ 438 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 439 440 /* Start acquire loop */ 441 for (;;) 442 { 443 /* Acquire the lock and break out if we acquired it first */ 444 if (!InterlockedExchange((PLONG)&Prcb->PrcbLock, 1)) break; 445 446 /* Loop until the other CPU releases it */ 447 do 448 { 449 /* Let the CPU know that this is a loop */ 450 YieldProcessor(); 451 } while (Prcb->PrcbLock); 452 } 453 } 454 455 // 456 // This routine releases the PRCB lock so that other callers can touch 457 // volatile PRCB data. 458 // 459 // Since this is a simple optimized spin-lock, it must be be only acquired 460 // at dispatcher level or higher! 461 // 462 FORCEINLINE 463 VOID 464 KiReleasePrcbLock(IN PKPRCB Prcb) 465 { 466 /* Make sure we are above dispatch and the lock is acquired! */ 467 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 468 ASSERT(Prcb->PrcbLock != 0); 469 470 /* Release it */ 471 InterlockedAnd((PLONG)&Prcb->PrcbLock, 0); 472 } 473 474 // 475 // This routine acquires the thread lock so that only one caller can touch 476 // volatile thread data. 477 // 478 // Since this is a simple optimized spin-lock, it must be be only acquired 479 // at dispatcher level or higher! 480 // 481 FORCEINLINE 482 VOID 483 KiAcquireThreadLock(IN PKTHREAD Thread) 484 { 485 /* Make sure we're at a safe level to touch the thread lock */ 486 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 487 488 /* Start acquire loop */ 489 for (;;) 490 { 491 /* Acquire the lock and break out if we acquired it first */ 492 if (!InterlockedExchange((PLONG)&Thread->ThreadLock, 1)) break; 493 494 /* Loop until the other CPU releases it */ 495 do 496 { 497 /* Let the CPU know that this is a loop */ 498 YieldProcessor(); 499 } while (Thread->ThreadLock); 500 } 501 } 502 503 // 504 // This routine releases the thread lock so that other callers can touch 505 // volatile thread data. 506 // 507 // Since this is a simple optimized spin-lock, it must be be only acquired 508 // at dispatcher level or higher! 509 // 510 FORCEINLINE 511 VOID 512 KiReleaseThreadLock(IN PKTHREAD Thread) 513 { 514 /* Make sure we are still above dispatch */ 515 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 516 517 /* Release it */ 518 InterlockedAnd((PLONG)&Thread->ThreadLock, 0); 519 } 520 521 FORCEINLINE 522 BOOLEAN 523 KiTryThreadLock(IN PKTHREAD Thread) 524 { 525 LONG Value; 526 527 /* If the lock isn't acquired, return false */ 528 if (!Thread->ThreadLock) return FALSE; 529 530 /* Otherwise, try to acquire it and check the result */ 531 Value = 1; 532 Value = InterlockedExchange((PLONG)&Thread->ThreadLock, Value); 533 534 /* Return the lock state */ 535 return (Value == 1); 536 } 537 538 FORCEINLINE 539 VOID 540 KiCheckDeferredReadyList(IN PKPRCB Prcb) 541 { 542 /* Scan the deferred ready lists if required */ 543 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb); 544 } 545 546 FORCEINLINE 547 VOID 548 KiRequestApcInterrupt(IN BOOLEAN NeedApc, 549 IN UCHAR Processor) 550 { 551 /* Check if we need to request APC delivery */ 552 if (NeedApc) 553 { 554 /* Check if it's on another CPU */ 555 if (KeGetCurrentPrcb()->Number != Processor) 556 { 557 /* Send an IPI to request delivery */ 558 KiIpiSend(AFFINITY_MASK(Processor), IPI_APC); 559 } 560 else 561 { 562 /* Request a software interrupt */ 563 HalRequestSoftwareInterrupt(APC_LEVEL); 564 } 565 } 566 } 567 568 FORCEINLINE 569 PKSPIN_LOCK_QUEUE 570 KiAcquireTimerLock(IN ULONG Hand) 571 { 572 PKSPIN_LOCK_QUEUE LockQueue; 573 ULONG LockIndex; 574 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 575 576 /* Get the lock index */ 577 LockIndex = Hand >> LOCK_QUEUE_TIMER_LOCK_SHIFT; 578 LockIndex &= (LOCK_QUEUE_TIMER_TABLE_LOCKS - 1); 579 580 /* Now get the lock */ 581 LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueueTimerTableLock + LockIndex]; 582 583 /* Acquire it and return */ 584 KeAcquireQueuedSpinLockAtDpcLevel(LockQueue); 585 return LockQueue; 586 } 587 588 FORCEINLINE 589 VOID 590 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue) 591 { 592 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 593 594 /* Release the lock */ 595 KeReleaseQueuedSpinLockFromDpcLevel(LockQueue); 596 } 597 598 #endif 599 600 FORCEINLINE 601 VOID 602 KiAcquireApcLockRaiseToSynch(IN PKTHREAD Thread, 603 IN PKLOCK_QUEUE_HANDLE Handle) 604 { 605 /* Acquire the lock and raise to synchronization level */ 606 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle); 607 } 608 609 FORCEINLINE 610 VOID 611 KiAcquireApcLockAtSynchLevel(IN PKTHREAD Thread, 612 IN PKLOCK_QUEUE_HANDLE Handle) 613 { 614 /* Acquire the lock */ 615 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL); 616 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle); 617 } 618 619 FORCEINLINE 620 VOID 621 KiAcquireApcLockRaiseToDpc(IN PKTHREAD Thread, 622 IN PKLOCK_QUEUE_HANDLE Handle) 623 { 624 /* Acquire the lock */ 625 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle); 626 } 627 628 FORCEINLINE 629 VOID 630 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle) 631 { 632 /* Release the lock */ 633 KeReleaseInStackQueuedSpinLock(Handle); 634 } 635 636 FORCEINLINE 637 VOID 638 KiReleaseApcLockFromSynchLevel(IN PKLOCK_QUEUE_HANDLE Handle) 639 { 640 /* Release the lock */ 641 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle); 642 } 643 644 FORCEINLINE 645 VOID 646 KiAcquireProcessLockRaiseToSynch(IN PKPROCESS Process, 647 IN PKLOCK_QUEUE_HANDLE Handle) 648 { 649 /* Acquire the lock and raise to synchronization level */ 650 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle); 651 } 652 653 FORCEINLINE 654 VOID 655 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle) 656 { 657 /* Release the lock and restore previous IRQL */ 658 KeReleaseInStackQueuedSpinLock(Handle); 659 } 660 661 FORCEINLINE 662 VOID 663 KiReleaseProcessLockFromSynchLevel(IN PKLOCK_QUEUE_HANDLE Handle) 664 { 665 /* Release the lock without lowering IRQL */ 666 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle); 667 } 668 669 FORCEINLINE 670 VOID 671 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue, 672 IN PKLOCK_QUEUE_HANDLE DeviceLock) 673 { 674 /* Check if we were called from a threaded DPC */ 675 if (KeGetCurrentPrcb()->DpcThreadActive) 676 { 677 /* Lock the Queue, we're not at DPC level */ 678 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock); 679 } 680 else 681 { 682 /* We must be at DPC level, acquire the lock safely */ 683 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 684 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock, 685 DeviceLock); 686 } 687 } 688 689 FORCEINLINE 690 VOID 691 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock) 692 { 693 /* Check if we were called from a threaded DPC */ 694 if (KeGetCurrentPrcb()->DpcThreadActive) 695 { 696 /* Unlock the Queue, we're not at DPC level */ 697 KeReleaseInStackQueuedSpinLock(DeviceLock); 698 } 699 else 700 { 701 /* We must be at DPC level, release the lock safely */ 702 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 703 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock); 704 } 705 } 706 707 // 708 // Satisfies the wait of a mutant dispatcher object 709 // 710 #define KiSatisfyMutantWait(Object, Thread) \ 711 { \ 712 /* Decrease the Signal State */ \ 713 (Object)->Header.SignalState--; \ 714 \ 715 /* Check if it's now non-signaled */ \ 716 if (!(Object)->Header.SignalState) \ 717 { \ 718 /* Set the Owner Thread */ \ 719 (Object)->OwnerThread = Thread; \ 720 \ 721 /* Disable APCs if needed */ \ 722 Thread->KernelApcDisable = Thread->KernelApcDisable - \ 723 (Object)->ApcDisable; \ 724 \ 725 /* Check if it's abandoned */ \ 726 if ((Object)->Abandoned) \ 727 { \ 728 /* Unabandon it */ \ 729 (Object)->Abandoned = FALSE; \ 730 \ 731 /* Return Status */ \ 732 Thread->WaitStatus = STATUS_ABANDONED; \ 733 } \ 734 \ 735 /* Insert it into the Mutant List */ \ 736 InsertHeadList(Thread->MutantListHead.Blink, \ 737 &(Object)->MutantListEntry); \ 738 } \ 739 } 740 741 // 742 // Satisfies the wait of any nonmutant dispatcher object 743 // 744 #define KiSatisfyNonMutantWait(Object) \ 745 { \ 746 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \ 747 EventSynchronizationObject) \ 748 { \ 749 /* Synchronization Timers and Events just get un-signaled */ \ 750 (Object)->Header.SignalState = 0; \ 751 } \ 752 else if ((Object)->Header.Type == SemaphoreObject) \ 753 { \ 754 /* These ones can have multiple states, so we only decrease it */ \ 755 (Object)->Header.SignalState--; \ 756 } \ 757 } 758 759 // 760 // Satisfies the wait of any dispatcher object 761 // 762 #define KiSatisfyObjectWait(Object, Thread) \ 763 { \ 764 /* Special case for Mutants */ \ 765 if ((Object)->Header.Type == MutantObject) \ 766 { \ 767 KiSatisfyMutantWait((Object), (Thread)); \ 768 } \ 769 else \ 770 { \ 771 KiSatisfyNonMutantWait(Object); \ 772 } \ 773 } 774 775 // 776 // Recalculates the due time 777 // 778 FORCEINLINE 779 PLARGE_INTEGER 780 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime, 781 IN PLARGE_INTEGER DueTime, 782 IN OUT PLARGE_INTEGER NewDueTime) 783 { 784 /* Don't do anything for absolute waits */ 785 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime; 786 787 /* Otherwise, query the interrupt time and recalculate */ 788 NewDueTime->QuadPart = KeQueryInterruptTime(); 789 NewDueTime->QuadPart -= DueTime->QuadPart; 790 return NewDueTime; 791 } 792 793 // 794 // Determines whether a thread should be added to the wait list 795 // 796 FORCEINLINE 797 BOOLEAN 798 KiCheckThreadStackSwap(IN PKTHREAD Thread, 799 IN KPROCESSOR_MODE WaitMode) 800 { 801 /* Check the required conditions */ 802 if ((WaitMode != KernelMode) && 803 (Thread->EnableStackSwap) && 804 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9))) 805 { 806 /* We are go for swap */ 807 return TRUE; 808 } 809 else 810 { 811 /* Don't swap the thread */ 812 return FALSE; 813 } 814 } 815 816 // 817 // Adds a thread to the wait list 818 // 819 #define KiAddThreadToWaitList(Thread, Swappable) \ 820 { \ 821 /* Make sure it's swappable */ \ 822 if (Swappable) \ 823 { \ 824 /* Insert it into the PRCB's List */ \ 825 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \ 826 &Thread->WaitListEntry); \ 827 } \ 828 } 829 830 // 831 // Checks if a wait in progress should be interrupted by APCs or an alertable 832 // state. 833 // 834 FORCEINLINE 835 NTSTATUS 836 KiCheckAlertability(IN PKTHREAD Thread, 837 IN BOOLEAN Alertable, 838 IN KPROCESSOR_MODE WaitMode) 839 { 840 /* Check if the wait is alertable */ 841 if (Alertable) 842 { 843 /* It is, first check if the thread is alerted in this mode */ 844 if (Thread->Alerted[WaitMode]) 845 { 846 /* It is, so bail out of the wait */ 847 Thread->Alerted[WaitMode] = FALSE; 848 return STATUS_ALERTED; 849 } 850 else if ((WaitMode != KernelMode) && 851 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode]))) 852 { 853 /* It's isn't, but this is a user wait with queued user APCs */ 854 Thread->ApcState.UserApcPending = TRUE; 855 return STATUS_USER_APC; 856 } 857 else if (Thread->Alerted[KernelMode]) 858 { 859 /* It isn't that either, but we're alered in kernel mode */ 860 Thread->Alerted[KernelMode] = FALSE; 861 return STATUS_ALERTED; 862 } 863 } 864 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending)) 865 { 866 /* Not alertable, but this is a user wait with pending user APCs */ 867 return STATUS_USER_APC; 868 } 869 870 /* Otherwise, we're fine */ 871 return STATUS_WAIT_0; 872 } 873 874 FORCEINLINE 875 ULONG 876 KiComputeTimerTableIndex(IN ULONGLONG DueTime) 877 { 878 return (DueTime / KeMaximumIncrement) & (TIMER_TABLE_SIZE - 1); 879 } 880 881 // 882 // Called from KiCompleteTimer, KiInsertTreeTimer, KeSetSystemTime 883 // to remove timer entries 884 // See Windows HPI blog for more information. 885 FORCEINLINE 886 VOID 887 KiRemoveEntryTimer(IN PKTIMER Timer) 888 { 889 ULONG Hand; 890 PKTIMER_TABLE_ENTRY TableEntry; 891 892 /* Remove the timer from the timer list and check if it's empty */ 893 Hand = Timer->Header.Hand; 894 if (RemoveEntryList(&Timer->TimerListEntry)) 895 { 896 /* Get the respective timer table entry */ 897 TableEntry = &KiTimerTableListHead[Hand]; 898 if (&TableEntry->Entry == TableEntry->Entry.Flink) 899 { 900 /* Set the entry to an infinite absolute time */ 901 TableEntry->Time.HighPart = 0xFFFFFFFF; 902 } 903 } 904 905 /* Clear the list entries on dbg builds so we can tell the timer is gone */ 906 #if DBG 907 Timer->TimerListEntry.Flink = NULL; 908 Timer->TimerListEntry.Blink = NULL; 909 #endif 910 } 911 912 // 913 // Called by Wait and Queue code to insert a timer for dispatching. 914 // Also called by KeSetTimerEx to insert a timer from the caller. 915 // 916 FORCEINLINE 917 VOID 918 KxInsertTimer(IN PKTIMER Timer, 919 IN ULONG Hand) 920 { 921 PKSPIN_LOCK_QUEUE LockQueue; 922 ASSERT(KeGetCurrentIrql() >= SYNCH_LEVEL); 923 924 /* Acquire the lock and release the dispatcher lock */ 925 LockQueue = KiAcquireTimerLock(Hand); 926 KiReleaseDispatcherLockFromSynchLevel(); 927 928 /* Try to insert the timer */ 929 if (KiInsertTimerTable(Timer, Hand)) 930 { 931 /* Complete it */ 932 KiCompleteTimer(Timer, LockQueue); 933 } 934 else 935 { 936 /* Do nothing, just release the lock */ 937 KiReleaseTimerLock(LockQueue); 938 } 939 } 940 941 // 942 // Called by KeSetTimerEx and KiInsertTreeTimer to calculate Due Time 943 // See the Windows HPI Blog for more information 944 // 945 FORCEINLINE 946 BOOLEAN 947 KiComputeDueTime(IN PKTIMER Timer, 948 IN LARGE_INTEGER DueTime, 949 OUT PULONG Hand) 950 { 951 LARGE_INTEGER InterruptTime, SystemTime, DifferenceTime; 952 953 /* Convert to relative time if needed */ 954 Timer->Header.Absolute = FALSE; 955 if (DueTime.HighPart >= 0) 956 { 957 /* Get System Time */ 958 KeQuerySystemTime(&SystemTime); 959 960 /* Do the conversion */ 961 DifferenceTime.QuadPart = SystemTime.QuadPart - DueTime.QuadPart; 962 963 /* Make sure it hasn't already expired */ 964 Timer->Header.Absolute = TRUE; 965 if (DifferenceTime.HighPart >= 0) 966 { 967 /* Cancel everything */ 968 Timer->Header.SignalState = TRUE; 969 Timer->Header.Hand = 0; 970 Timer->DueTime.QuadPart = 0; 971 *Hand = 0; 972 return FALSE; 973 } 974 975 /* Set the time as Absolute */ 976 DueTime = DifferenceTime; 977 } 978 979 /* Get the Interrupt Time */ 980 InterruptTime.QuadPart = KeQueryInterruptTime(); 981 982 /* Recalculate due time */ 983 Timer->DueTime.QuadPart = InterruptTime.QuadPart - DueTime.QuadPart; 984 985 /* Get the handle */ 986 *Hand = KiComputeTimerTableIndex(Timer->DueTime.QuadPart); 987 Timer->Header.Hand = (UCHAR)*Hand; 988 Timer->Header.Inserted = TRUE; 989 return TRUE; 990 } 991 992 // 993 // Called from Unlink and Queue Insert Code. 994 // Also called by timer code when canceling an inserted timer. 995 // Removes a timer from it's tree. 996 // 997 FORCEINLINE 998 VOID 999 KxRemoveTreeTimer(IN PKTIMER Timer) 1000 { 1001 ULONG Hand = Timer->Header.Hand; 1002 PKSPIN_LOCK_QUEUE LockQueue; 1003 PKTIMER_TABLE_ENTRY TimerEntry; 1004 1005 /* Acquire timer lock */ 1006 LockQueue = KiAcquireTimerLock(Hand); 1007 1008 /* Set the timer as non-inserted */ 1009 Timer->Header.Inserted = FALSE; 1010 1011 /* Remove it from the timer list */ 1012 if (RemoveEntryList(&Timer->TimerListEntry)) 1013 { 1014 /* Get the entry and check if it's empty */ 1015 TimerEntry = &KiTimerTableListHead[Hand]; 1016 if (IsListEmpty(&TimerEntry->Entry)) 1017 { 1018 /* Clear the time then */ 1019 TimerEntry->Time.HighPart = 0xFFFFFFFF; 1020 } 1021 } 1022 1023 /* Release the timer lock */ 1024 KiReleaseTimerLock(LockQueue); 1025 } 1026 1027 FORCEINLINE 1028 VOID 1029 KxSetTimerForThreadWait(IN PKTIMER Timer, 1030 IN LARGE_INTEGER Interval, 1031 OUT PULONG Hand) 1032 { 1033 ULONGLONG DueTime; 1034 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference; 1035 1036 /* Check the timer's interval to see if it's absolute */ 1037 Timer->Header.Absolute = FALSE; 1038 if (Interval.HighPart >= 0) 1039 { 1040 /* Get the system time and calculate the relative time */ 1041 KeQuerySystemTime(&SystemTime); 1042 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart; 1043 Timer->Header.Absolute = TRUE; 1044 1045 /* Check if we've already expired */ 1046 if (TimeDifference.HighPart >= 0) 1047 { 1048 /* Reset everything */ 1049 Timer->DueTime.QuadPart = 0; 1050 *Hand = 0; 1051 Timer->Header.Hand = 0; 1052 return; 1053 } 1054 else 1055 { 1056 /* Update the interval */ 1057 Interval = TimeDifference; 1058 } 1059 } 1060 1061 /* Calculate the due time */ 1062 InterruptTime.QuadPart = KeQueryInterruptTime(); 1063 DueTime = InterruptTime.QuadPart - Interval.QuadPart; 1064 Timer->DueTime.QuadPart = DueTime; 1065 1066 /* Calculate the timer handle */ 1067 *Hand = KiComputeTimerTableIndex(DueTime); 1068 Timer->Header.Hand = (UCHAR)*Hand; 1069 } 1070 1071 #define KxDelayThreadWait() \ 1072 \ 1073 /* Setup the Wait Block */ \ 1074 Thread->WaitBlockList = TimerBlock; \ 1075 \ 1076 /* Setup the timer */ \ 1077 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \ 1078 \ 1079 /* Save the due time for the caller */ \ 1080 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1081 \ 1082 /* Link the timer to this Wait Block */ \ 1083 TimerBlock->NextWaitBlock = TimerBlock; \ 1084 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \ 1085 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \ 1086 \ 1087 /* Clear wait status */ \ 1088 Thread->WaitStatus = STATUS_SUCCESS; \ 1089 \ 1090 /* Setup wait fields */ \ 1091 Thread->Alertable = Alertable; \ 1092 Thread->WaitReason = DelayExecution; \ 1093 Thread->WaitMode = WaitMode; \ 1094 \ 1095 /* Check if we can swap the thread's stack */ \ 1096 Thread->WaitListEntry.Flink = NULL; \ 1097 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1098 \ 1099 /* Set the wait time */ \ 1100 Thread->WaitTime = KeTickCount.LowPart; 1101 1102 #define KxMultiThreadWait() \ 1103 /* Link wait block array to the thread */ \ 1104 Thread->WaitBlockList = WaitBlockArray; \ 1105 \ 1106 /* Reset the index */ \ 1107 Index = 0; \ 1108 \ 1109 /* Loop wait blocks */ \ 1110 do \ 1111 { \ 1112 /* Fill out the wait block */ \ 1113 WaitBlock = &WaitBlockArray[Index]; \ 1114 WaitBlock->Object = Object[Index]; \ 1115 WaitBlock->WaitKey = (USHORT)Index; \ 1116 WaitBlock->WaitType = WaitType; \ 1117 WaitBlock->Thread = Thread; \ 1118 \ 1119 /* Link to next block */ \ 1120 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \ 1121 Index++; \ 1122 } while (Index < Count); \ 1123 \ 1124 /* Link the last block */ \ 1125 WaitBlock->NextWaitBlock = WaitBlockArray; \ 1126 \ 1127 /* Set default wait status */ \ 1128 Thread->WaitStatus = STATUS_WAIT_0; \ 1129 \ 1130 /* Check if we have a timer */ \ 1131 if (Timeout) \ 1132 { \ 1133 /* Link to the block */ \ 1134 TimerBlock->NextWaitBlock = WaitBlockArray; \ 1135 \ 1136 /* Setup the timer */ \ 1137 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \ 1138 \ 1139 /* Save the due time for the caller */ \ 1140 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1141 \ 1142 /* Initialize the list */ \ 1143 InitializeListHead(&Timer->Header.WaitListHead); \ 1144 } \ 1145 \ 1146 /* Set wait settings */ \ 1147 Thread->Alertable = Alertable; \ 1148 Thread->WaitMode = WaitMode; \ 1149 Thread->WaitReason = WaitReason; \ 1150 \ 1151 /* Check if we can swap the thread's stack */ \ 1152 Thread->WaitListEntry.Flink = NULL; \ 1153 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1154 \ 1155 /* Set the wait time */ \ 1156 Thread->WaitTime = KeTickCount.LowPart; 1157 1158 #define KxSingleThreadWait() \ 1159 /* Setup the Wait Block */ \ 1160 Thread->WaitBlockList = WaitBlock; \ 1161 WaitBlock->WaitKey = STATUS_SUCCESS; \ 1162 WaitBlock->Object = Object; \ 1163 WaitBlock->WaitType = WaitAny; \ 1164 \ 1165 /* Clear wait status */ \ 1166 Thread->WaitStatus = STATUS_SUCCESS; \ 1167 \ 1168 /* Check if we have a timer */ \ 1169 if (Timeout) \ 1170 { \ 1171 /* Setup the timer */ \ 1172 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \ 1173 \ 1174 /* Save the due time for the caller */ \ 1175 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1176 \ 1177 /* Pointer to timer block */ \ 1178 WaitBlock->NextWaitBlock = TimerBlock; \ 1179 TimerBlock->NextWaitBlock = WaitBlock; \ 1180 \ 1181 /* Link the timer to this Wait Block */ \ 1182 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \ 1183 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \ 1184 } \ 1185 else \ 1186 { \ 1187 /* No timer block, just ourselves */ \ 1188 WaitBlock->NextWaitBlock = WaitBlock; \ 1189 } \ 1190 \ 1191 /* Set wait settings */ \ 1192 Thread->Alertable = Alertable; \ 1193 Thread->WaitMode = WaitMode; \ 1194 Thread->WaitReason = WaitReason; \ 1195 \ 1196 /* Check if we can swap the thread's stack */ \ 1197 Thread->WaitListEntry.Flink = NULL; \ 1198 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1199 \ 1200 /* Set the wait time */ \ 1201 Thread->WaitTime = KeTickCount.LowPart; 1202 1203 #define KxQueueThreadWait() \ 1204 /* Setup the Wait Block */ \ 1205 Thread->WaitBlockList = WaitBlock; \ 1206 WaitBlock->WaitKey = STATUS_SUCCESS; \ 1207 WaitBlock->Object = Queue; \ 1208 WaitBlock->WaitType = WaitAny; \ 1209 WaitBlock->Thread = Thread; \ 1210 \ 1211 /* Clear wait status */ \ 1212 Thread->WaitStatus = STATUS_SUCCESS; \ 1213 \ 1214 /* Check if we have a timer */ \ 1215 if (Timeout) \ 1216 { \ 1217 /* Setup the timer */ \ 1218 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \ 1219 \ 1220 /* Save the due time for the caller */ \ 1221 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1222 \ 1223 /* Pointer to timer block */ \ 1224 WaitBlock->NextWaitBlock = TimerBlock; \ 1225 TimerBlock->NextWaitBlock = WaitBlock; \ 1226 \ 1227 /* Link the timer to this Wait Block */ \ 1228 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \ 1229 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \ 1230 } \ 1231 else \ 1232 { \ 1233 /* No timer block, just ourselves */ \ 1234 WaitBlock->NextWaitBlock = WaitBlock; \ 1235 } \ 1236 \ 1237 /* Set wait settings */ \ 1238 Thread->Alertable = FALSE; \ 1239 Thread->WaitMode = WaitMode; \ 1240 Thread->WaitReason = WrQueue; \ 1241 \ 1242 /* Check if we can swap the thread's stack */ \ 1243 Thread->WaitListEntry.Flink = NULL; \ 1244 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1245 \ 1246 /* Set the wait time */ \ 1247 Thread->WaitTime = KeTickCount.LowPart; 1248 1249 // 1250 // Unwaits a Thread 1251 // 1252 FORCEINLINE 1253 VOID 1254 KxUnwaitThread(IN DISPATCHER_HEADER *Object, 1255 IN KPRIORITY Increment) 1256 { 1257 PLIST_ENTRY WaitEntry, WaitList; 1258 PKWAIT_BLOCK WaitBlock; 1259 PKTHREAD WaitThread; 1260 ULONG WaitKey; 1261 1262 /* Loop the Wait Entries */ 1263 WaitList = &Object->WaitListHead; 1264 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE); 1265 WaitEntry = WaitList->Flink; 1266 do 1267 { 1268 /* Get the current wait block */ 1269 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry); 1270 1271 /* Get the waiting thread */ 1272 WaitThread = WaitBlock->Thread; 1273 1274 /* Check the current Wait Mode */ 1275 if (WaitBlock->WaitType == WaitAny) 1276 { 1277 /* Use the actual wait key */ 1278 WaitKey = WaitBlock->WaitKey; 1279 } 1280 else 1281 { 1282 /* Otherwise, use STATUS_KERNEL_APC */ 1283 WaitKey = STATUS_KERNEL_APC; 1284 } 1285 1286 /* Unwait the thread */ 1287 KiUnwaitThread(WaitThread, WaitKey, Increment); 1288 1289 /* Next entry */ 1290 WaitEntry = WaitList->Flink; 1291 } while (WaitEntry != WaitList); 1292 } 1293 1294 // 1295 // Unwaits a Thread waiting on an event 1296 // 1297 FORCEINLINE 1298 VOID 1299 KxUnwaitThreadForEvent(IN PKEVENT Event, 1300 IN KPRIORITY Increment) 1301 { 1302 PLIST_ENTRY WaitEntry, WaitList; 1303 PKWAIT_BLOCK WaitBlock; 1304 PKTHREAD WaitThread; 1305 1306 /* Loop the Wait Entries */ 1307 WaitList = &Event->Header.WaitListHead; 1308 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE); 1309 WaitEntry = WaitList->Flink; 1310 do 1311 { 1312 /* Get the current wait block */ 1313 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry); 1314 1315 /* Get the waiting thread */ 1316 WaitThread = WaitBlock->Thread; 1317 1318 /* Check the current Wait Mode */ 1319 if (WaitBlock->WaitType == WaitAny) 1320 { 1321 /* Un-signal it */ 1322 Event->Header.SignalState = 0; 1323 1324 /* Un-signal the event and unwait the thread */ 1325 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment); 1326 break; 1327 } 1328 1329 /* Unwait the thread with STATUS_KERNEL_APC */ 1330 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment); 1331 1332 /* Next entry */ 1333 WaitEntry = WaitList->Flink; 1334 } while (WaitEntry != WaitList); 1335 } 1336 1337 // 1338 // This routine queues a thread that is ready on the PRCB's ready lists. 1339 // If this thread cannot currently run on this CPU, then the thread is 1340 // added to the deferred ready list instead. 1341 // 1342 // This routine must be entered with the PRCB lock held and it will exit 1343 // with the PRCB lock released! 1344 // 1345 FORCEINLINE 1346 VOID 1347 KxQueueReadyThread(IN PKTHREAD Thread, 1348 IN PKPRCB Prcb) 1349 { 1350 BOOLEAN Preempted; 1351 KPRIORITY Priority; 1352 1353 /* Sanity checks */ 1354 ASSERT(Prcb == KeGetCurrentPrcb()); 1355 ASSERT(Thread->State == Running); 1356 ASSERT(Thread->NextProcessor == Prcb->Number); 1357 1358 /* Check if this thread is allowed to run in this CPU */ 1359 #ifdef CONFIG_SMP 1360 if ((Thread->Affinity) & (Prcb->SetMember)) 1361 #else 1362 if (TRUE) 1363 #endif 1364 { 1365 /* Set thread ready for execution */ 1366 Thread->State = Ready; 1367 1368 /* Save current priority and if someone had pre-empted it */ 1369 Priority = Thread->Priority; 1370 Preempted = Thread->Preempted; 1371 1372 /* We're not pre-empting now, and set the wait time */ 1373 Thread->Preempted = FALSE; 1374 Thread->WaitTime = KeTickCount.LowPart; 1375 1376 /* Sanity check */ 1377 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY)); 1378 1379 /* Insert this thread in the appropriate order */ 1380 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority], 1381 &Thread->WaitListEntry) : 1382 InsertTailList(&Prcb->DispatcherReadyListHead[Priority], 1383 &Thread->WaitListEntry); 1384 1385 /* Update the ready summary */ 1386 Prcb->ReadySummary |= PRIORITY_MASK(Priority); 1387 1388 /* Sanity check */ 1389 ASSERT(Priority == Thread->Priority); 1390 1391 /* Release the PRCB lock */ 1392 KiReleasePrcbLock(Prcb); 1393 } 1394 else 1395 { 1396 /* Otherwise, prepare this thread to be deferred */ 1397 Thread->State = DeferredReady; 1398 Thread->DeferredProcessor = Prcb->Number; 1399 1400 /* Release the lock and defer scheduling */ 1401 KiReleasePrcbLock(Prcb); 1402 KiDeferredReadyThread(Thread); 1403 } 1404 } 1405 1406 // 1407 // This routine scans for an appropriate ready thread to select at the 1408 // given priority and for the given CPU. 1409 // 1410 FORCEINLINE 1411 PKTHREAD 1412 KiSelectReadyThread(IN KPRIORITY Priority, 1413 IN PKPRCB Prcb) 1414 { 1415 ULONG PrioritySet; 1416 LONG HighPriority; 1417 PLIST_ENTRY ListEntry; 1418 PKTHREAD Thread = NULL; 1419 1420 /* Save the current mask and get the priority set for the CPU */ 1421 PrioritySet = Prcb->ReadySummary >> Priority; 1422 if (!PrioritySet) goto Quickie; 1423 1424 /* Get the highest priority possible */ 1425 BitScanReverse((PULONG)&HighPriority, PrioritySet); 1426 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0); 1427 HighPriority += Priority; 1428 1429 /* Make sure the list isn't empty at the highest priority */ 1430 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE); 1431 1432 /* Get the first thread on the list */ 1433 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink; 1434 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry); 1435 1436 /* Make sure this thread is here for a reason */ 1437 ASSERT(HighPriority == Thread->Priority); 1438 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number)); 1439 ASSERT(Thread->NextProcessor == Prcb->Number); 1440 1441 /* Remove it from the list */ 1442 if (RemoveEntryList(&Thread->WaitListEntry)) 1443 { 1444 /* The list is empty now, reset the ready summary */ 1445 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority); 1446 } 1447 1448 /* Sanity check and return the thread */ 1449 Quickie: 1450 ASSERT((Thread == NULL) || 1451 (Thread->BasePriority == 0) || 1452 (Thread->Priority != 0)); 1453 return Thread; 1454 } 1455 1456 // 1457 // This routine computes the new priority for a thread. It is only valid for 1458 // threads with priorities in the dynamic priority range. 1459 // 1460 FORCEINLINE 1461 SCHAR 1462 KiComputeNewPriority(IN PKTHREAD Thread, 1463 IN SCHAR Adjustment) 1464 { 1465 SCHAR Priority; 1466 1467 /* Priority sanity checks */ 1468 ASSERT((Thread->PriorityDecrement >= 0) && 1469 (Thread->PriorityDecrement <= Thread->Priority)); 1470 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ? 1471 TRUE : (Thread->PriorityDecrement == 0)); 1472 1473 /* Get the current priority */ 1474 Priority = Thread->Priority; 1475 if (Priority < LOW_REALTIME_PRIORITY) 1476 { 1477 /* Decrease priority by the priority decrement */ 1478 Priority -= (Thread->PriorityDecrement + Adjustment); 1479 1480 /* Don't go out of bounds */ 1481 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority; 1482 1483 /* Reset the priority decrement */ 1484 Thread->PriorityDecrement = 0; 1485 } 1486 1487 /* Sanity check */ 1488 ASSERT((Thread->BasePriority == 0) || (Priority != 0)); 1489 1490 /* Return the new priority */ 1491 return Priority; 1492 } 1493 1494 // 1495 // Guarded Mutex Routines 1496 // 1497 FORCEINLINE 1498 VOID 1499 _KeInitializeGuardedMutex(OUT PKGUARDED_MUTEX GuardedMutex) 1500 { 1501 /* Setup the Initial Data */ 1502 GuardedMutex->Count = GM_LOCK_BIT; 1503 GuardedMutex->Owner = NULL; 1504 GuardedMutex->Contention = 0; 1505 1506 /* Initialize the Wait Gate */ 1507 KeInitializeGate(&GuardedMutex->Gate); 1508 } 1509 1510 FORCEINLINE 1511 VOID 1512 _KeAcquireGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex) 1513 { 1514 PKTHREAD Thread = KeGetCurrentThread(); 1515 1516 /* Sanity checks */ 1517 ASSERT((KeGetCurrentIrql() == APC_LEVEL) || 1518 (Thread->SpecialApcDisable < 0) || 1519 (Thread->Teb == NULL) || 1520 (Thread->Teb >= (PTEB)MM_SYSTEM_RANGE_START)); 1521 ASSERT(GuardedMutex->Owner != Thread); 1522 1523 /* Remove the lock */ 1524 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V)) 1525 { 1526 /* The Guarded Mutex was already locked, enter contented case */ 1527 KiAcquireGuardedMutex(GuardedMutex); 1528 } 1529 1530 /* Set the Owner */ 1531 GuardedMutex->Owner = Thread; 1532 } 1533 1534 FORCEINLINE 1535 VOID 1536 _KeReleaseGuardedMutexUnsafe(IN OUT PKGUARDED_MUTEX GuardedMutex) 1537 { 1538 LONG OldValue, NewValue; 1539 1540 /* Sanity checks */ 1541 ASSERT((KeGetCurrentIrql() == APC_LEVEL) || 1542 (KeGetCurrentThread()->SpecialApcDisable < 0) || 1543 (KeGetCurrentThread()->Teb == NULL) || 1544 (KeGetCurrentThread()->Teb >= (PTEB)MM_SYSTEM_RANGE_START)); 1545 ASSERT(GuardedMutex->Owner == KeGetCurrentThread()); 1546 1547 /* Destroy the Owner */ 1548 GuardedMutex->Owner = NULL; 1549 1550 /* Add the Lock Bit */ 1551 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT); 1552 ASSERT((OldValue & GM_LOCK_BIT) == 0); 1553 1554 /* Check if it was already locked, but not woken */ 1555 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN)) 1556 { 1557 /* Update the Oldvalue to what it should be now */ 1558 OldValue += GM_LOCK_BIT; 1559 1560 /* The mutex will be woken, minus one waiter */ 1561 NewValue = OldValue + GM_LOCK_WAITER_WOKEN - 1562 GM_LOCK_WAITER_INC; 1563 1564 /* Remove the Woken bit */ 1565 if (InterlockedCompareExchange(&GuardedMutex->Count, 1566 NewValue, 1567 OldValue) == OldValue) 1568 { 1569 /* Signal the Gate */ 1570 KeSignalGateBoostPriority(&GuardedMutex->Gate); 1571 } 1572 } 1573 } 1574 1575 FORCEINLINE 1576 VOID 1577 _KeAcquireGuardedMutex(IN PKGUARDED_MUTEX GuardedMutex) 1578 { 1579 PKTHREAD Thread = KeGetCurrentThread(); 1580 1581 /* Sanity checks */ 1582 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); 1583 ASSERT(GuardedMutex->Owner != Thread); 1584 1585 /* Disable Special APCs */ 1586 KeEnterGuardedRegionThread(Thread); 1587 1588 /* Remove the lock */ 1589 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V)) 1590 { 1591 /* The Guarded Mutex was already locked, enter contented case */ 1592 KiAcquireGuardedMutex(GuardedMutex); 1593 } 1594 1595 /* Set the Owner and Special APC Disable state */ 1596 GuardedMutex->Owner = Thread; 1597 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable; 1598 } 1599 1600 FORCEINLINE 1601 VOID 1602 _KeReleaseGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex) 1603 { 1604 PKTHREAD Thread = KeGetCurrentThread(); 1605 LONG OldValue, NewValue; 1606 1607 /* Sanity checks */ 1608 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); 1609 ASSERT(GuardedMutex->Owner == Thread); 1610 ASSERT(Thread->SpecialApcDisable == GuardedMutex->SpecialApcDisable); 1611 1612 /* Destroy the Owner */ 1613 GuardedMutex->Owner = NULL; 1614 1615 /* Add the Lock Bit */ 1616 OldValue = InterlockedExchangeAdd(&GuardedMutex->Count, GM_LOCK_BIT); 1617 ASSERT((OldValue & GM_LOCK_BIT) == 0); 1618 1619 /* Check if it was already locked, but not woken */ 1620 if ((OldValue) && !(OldValue & GM_LOCK_WAITER_WOKEN)) 1621 { 1622 /* Update the Oldvalue to what it should be now */ 1623 OldValue += GM_LOCK_BIT; 1624 1625 /* The mutex will be woken, minus one waiter */ 1626 NewValue = OldValue + GM_LOCK_WAITER_WOKEN - 1627 GM_LOCK_WAITER_INC; 1628 1629 /* Remove the Woken bit */ 1630 if (InterlockedCompareExchange(&GuardedMutex->Count, 1631 NewValue, 1632 OldValue) == OldValue) 1633 { 1634 /* Signal the Gate */ 1635 KeSignalGateBoostPriority(&GuardedMutex->Gate); 1636 } 1637 } 1638 1639 /* Re-enable APCs */ 1640 KeLeaveGuardedRegionThread(Thread); 1641 } 1642 1643 FORCEINLINE 1644 BOOLEAN 1645 _KeTryToAcquireGuardedMutex(IN OUT PKGUARDED_MUTEX GuardedMutex) 1646 { 1647 PKTHREAD Thread = KeGetCurrentThread(); 1648 1649 /* Block APCs */ 1650 KeEnterGuardedRegionThread(Thread); 1651 1652 /* Remove the lock */ 1653 if (!InterlockedBitTestAndReset(&GuardedMutex->Count, GM_LOCK_BIT_V)) 1654 { 1655 /* Re-enable APCs */ 1656 KeLeaveGuardedRegionThread(Thread); 1657 YieldProcessor(); 1658 1659 /* Return failure */ 1660 return FALSE; 1661 } 1662 1663 /* Set the Owner and APC State */ 1664 GuardedMutex->Owner = Thread; 1665 GuardedMutex->SpecialApcDisable = Thread->SpecialApcDisable; 1666 return TRUE; 1667 } 1668 1669 1670 FORCEINLINE 1671 VOID 1672 KiAcquireNmiListLock(OUT PKIRQL OldIrql) 1673 { 1674 KeAcquireSpinLock(&KiNmiCallbackListLock, OldIrql); 1675 } 1676 1677 FORCEINLINE 1678 VOID 1679 KiReleaseNmiListLock(IN KIRQL OldIrql) 1680 { 1681 KeReleaseSpinLock(&KiNmiCallbackListLock, OldIrql); 1682 } 1683 1684 #if defined(_M_IX86) || defined(_M_AMD64) 1685 FORCEINLINE 1686 VOID 1687 KiCpuId( 1688 PCPU_INFO CpuInfo, 1689 ULONG Function) 1690 { 1691 __cpuid((INT*)CpuInfo->AsUINT32, Function); 1692 } 1693 1694 FORCEINLINE 1695 VOID 1696 KiCpuIdEx( 1697 PCPU_INFO CpuInfo, 1698 ULONG Function, 1699 ULONG SubFunction) 1700 { 1701 __cpuidex((INT*)CpuInfo->AsUINT32, Function, SubFunction); 1702 } 1703 #endif /* _M_IX86 || _M_AMD64 */ 1704