1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: GPL - See COPYING in the top level directory 4 * FILE: ntoskrnl/include/ke_x.h 5 * PURPOSE: Internal Inlined Functions for the Kernel 6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org) 7 */ 8 9 // 10 // Thread Dispatcher Header DebugActive Mask 11 // 12 #define DR_MASK(x) 1 << x 13 #define DR_ACTIVE_MASK 0x10 14 #define DR_REG_MASK 0x4F 15 16 #ifdef _M_IX86 17 // 18 // Sanitizes a selector 19 // 20 FORCEINLINE 21 ULONG 22 Ke386SanitizeSeg(IN ULONG Cs, 23 IN KPROCESSOR_MODE Mode) 24 { 25 // 26 // Check if we're in kernel-mode, and force CPL 0 if so. 27 // Otherwise, force CPL 3. 28 // 29 return ((Mode == KernelMode) ? 30 (Cs & (0xFFFF & ~RPL_MASK)) : 31 (RPL_MASK | (Cs & 0xFFFF))); 32 } 33 34 // 35 // Sanitizes EFLAGS 36 // 37 FORCEINLINE 38 ULONG 39 Ke386SanitizeFlags(IN ULONG Eflags, 40 IN KPROCESSOR_MODE Mode) 41 { 42 // 43 // Check if we're in kernel-mode, and sanitize EFLAGS if so. 44 // Otherwise, also force interrupt mask on. 45 // 46 return ((Mode == KernelMode) ? 47 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) : 48 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE))); 49 } 50 51 // 52 // Gets a DR register from a CONTEXT structure 53 // 54 FORCEINLINE 55 PVOID 56 KiDrFromContext(IN ULONG Dr, 57 IN PCONTEXT Context) 58 { 59 return *(PVOID*)((ULONG_PTR)Context + KiDebugRegisterContextOffsets[Dr]); 60 } 61 62 // 63 // Gets a DR register from a KTRAP_FRAME structure 64 // 65 FORCEINLINE 66 PVOID* 67 KiDrFromTrapFrame(IN ULONG Dr, 68 IN PKTRAP_FRAME TrapFrame) 69 { 70 return (PVOID*)((ULONG_PTR)TrapFrame + KiDebugRegisterTrapOffsets[Dr]); 71 } 72 73 // 74 // 75 // 76 FORCEINLINE 77 PVOID 78 Ke386SanitizeDr(IN PVOID DrAddress, 79 IN KPROCESSOR_MODE Mode) 80 { 81 // 82 // Check if we're in kernel-mode, and return the address directly if so. 83 // Otherwise, make sure it's not inside the kernel-mode address space. 84 // If it is, then clear the address. 85 // 86 return ((Mode == KernelMode) ? DrAddress : 87 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0); 88 } 89 #endif /* _M_IX86 */ 90 91 // 92 // Enters a Guarded Region 93 // 94 #define KeEnterGuardedRegion() \ 95 { \ 96 PKTHREAD _Thread = KeGetCurrentThread(); \ 97 \ 98 /* Sanity checks */ \ 99 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \ 100 ASSERT(_Thread == KeGetCurrentThread()); \ 101 ASSERT((_Thread->SpecialApcDisable <= 0) && \ 102 (_Thread->SpecialApcDisable != -32768)); \ 103 \ 104 /* Disable Special APCs */ \ 105 _Thread->SpecialApcDisable--; \ 106 } 107 108 // 109 // Leaves a Guarded Region 110 // 111 #define KeLeaveGuardedRegion() \ 112 { \ 113 PKTHREAD _Thread = KeGetCurrentThread(); \ 114 \ 115 /* Sanity checks */ \ 116 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); \ 117 ASSERT(_Thread == KeGetCurrentThread()); \ 118 ASSERT(_Thread->SpecialApcDisable < 0); \ 119 \ 120 /* Leave region and check if APCs are OK now */ \ 121 if (!(++_Thread->SpecialApcDisable)) \ 122 { \ 123 /* Check for Kernel APCs on the list */ \ 124 if (!IsListEmpty(&_Thread->ApcState. \ 125 ApcListHead[KernelMode])) \ 126 { \ 127 /* Check for APC Delivery */ \ 128 KiCheckForKernelApcDelivery(); \ 129 } \ 130 } \ 131 } 132 133 // 134 // TODO: Guarded Mutex Routines 135 // 136 137 // 138 // Enters a Critical Region 139 // 140 #define KeEnterCriticalRegion() \ 141 { \ 142 PKTHREAD _Thread = KeGetCurrentThread(); \ 143 \ 144 /* Sanity checks */ \ 145 ASSERT(_Thread == KeGetCurrentThread()); \ 146 ASSERT((_Thread->KernelApcDisable <= 0) && \ 147 (_Thread->KernelApcDisable != -32768)); \ 148 \ 149 /* Disable Kernel APCs */ \ 150 _Thread->KernelApcDisable--; \ 151 } 152 153 // 154 // Leaves a Critical Region 155 // 156 #define KeLeaveCriticalRegion() \ 157 { \ 158 PKTHREAD _Thread = KeGetCurrentThread(); \ 159 \ 160 /* Sanity checks */ \ 161 ASSERT(_Thread == KeGetCurrentThread()); \ 162 ASSERT(_Thread->KernelApcDisable < 0); \ 163 \ 164 /* Enable Kernel APCs */ \ 165 _Thread->KernelApcDisable++; \ 166 \ 167 /* Check if Kernel APCs are now enabled */ \ 168 if (!(_Thread->KernelApcDisable)) \ 169 { \ 170 /* Check if we need to request an APC Delivery */ \ 171 if (!(IsListEmpty(&_Thread->ApcState.ApcListHead[KernelMode])) && \ 172 !(_Thread->SpecialApcDisable)) \ 173 { \ 174 /* Check for the right environment */ \ 175 KiCheckForKernelApcDelivery(); \ 176 } \ 177 } \ 178 } 179 180 #ifndef CONFIG_SMP 181 // 182 // Spinlock Acquire at IRQL >= DISPATCH_LEVEL 183 // 184 FORCEINLINE 185 VOID 186 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock) 187 { 188 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */ 189 UNREFERENCED_PARAMETER(SpinLock); 190 } 191 192 // 193 // Spinlock Release at IRQL >= DISPATCH_LEVEL 194 // 195 FORCEINLINE 196 VOID 197 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock) 198 { 199 /* On UP builds, spinlocks don't exist at IRQL >= DISPATCH */ 200 UNREFERENCED_PARAMETER(SpinLock); 201 } 202 203 // 204 // This routine protects against multiple CPU acquires, it's meaningless on UP. 205 // 206 VOID 207 FORCEINLINE 208 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object) 209 { 210 UNREFERENCED_PARAMETER(Object); 211 } 212 213 // 214 // This routine protects against multiple CPU acquires, it's meaningless on UP. 215 // 216 VOID 217 FORCEINLINE 218 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object) 219 { 220 UNREFERENCED_PARAMETER(Object); 221 } 222 223 KIRQL 224 FORCEINLINE 225 KiAcquireDispatcherLock(VOID) 226 { 227 /* Raise to DPC level */ 228 return KeRaiseIrqlToDpcLevel(); 229 } 230 231 VOID 232 FORCEINLINE 233 KiReleaseDispatcherLock(IN KIRQL OldIrql) 234 { 235 /* Just exit the dispatcher */ 236 KiExitDispatcher(OldIrql); 237 } 238 239 VOID 240 FORCEINLINE 241 KiAcquireDispatcherLockAtDpcLevel(VOID) 242 { 243 /* This is a no-op at DPC Level for UP systems */ 244 return; 245 } 246 247 VOID 248 FORCEINLINE 249 KiReleaseDispatcherLockFromDpcLevel(VOID) 250 { 251 /* This is a no-op at DPC Level for UP systems */ 252 return; 253 } 254 255 // 256 // This routine makes the thread deferred ready on the boot CPU. 257 // 258 FORCEINLINE 259 VOID 260 KiInsertDeferredReadyList(IN PKTHREAD Thread) 261 { 262 /* Set the thread to deferred state and boot CPU */ 263 Thread->State = DeferredReady; 264 Thread->DeferredProcessor = 0; 265 266 /* Make the thread ready immediately */ 267 KiDeferredReadyThread(Thread); 268 } 269 270 FORCEINLINE 271 VOID 272 KiRescheduleThread(IN BOOLEAN NewThread, 273 IN ULONG Cpu) 274 { 275 /* This is meaningless on UP systems */ 276 UNREFERENCED_PARAMETER(NewThread); 277 UNREFERENCED_PARAMETER(Cpu); 278 } 279 280 // 281 // This routine protects against multiple CPU acquires, it's meaningless on UP. 282 // 283 FORCEINLINE 284 VOID 285 KiSetThreadSwapBusy(IN PKTHREAD Thread) 286 { 287 UNREFERENCED_PARAMETER(Thread); 288 } 289 290 // 291 // This routine protects against multiple CPU acquires, it's meaningless on UP. 292 // 293 FORCEINLINE 294 VOID 295 KiAcquirePrcbLock(IN PKPRCB Prcb) 296 { 297 UNREFERENCED_PARAMETER(Prcb); 298 } 299 300 // 301 // This routine protects against multiple CPU acquires, it's meaningless on UP. 302 // 303 FORCEINLINE 304 VOID 305 KiReleasePrcbLock(IN PKPRCB Prcb) 306 { 307 UNREFERENCED_PARAMETER(Prcb); 308 } 309 310 // 311 // This routine protects against multiple CPU acquires, it's meaningless on UP. 312 // 313 FORCEINLINE 314 VOID 315 KiAcquireThreadLock(IN PKTHREAD Thread) 316 { 317 UNREFERENCED_PARAMETER(Thread); 318 } 319 320 // 321 // This routine protects against multiple CPU acquires, it's meaningless on UP. 322 // 323 FORCEINLINE 324 VOID 325 KiReleaseThreadLock(IN PKTHREAD Thread) 326 { 327 UNREFERENCED_PARAMETER(Thread); 328 } 329 330 // 331 // This routine protects against multiple CPU acquires, it's meaningless on UP. 332 // 333 FORCEINLINE 334 BOOLEAN 335 KiTryThreadLock(IN PKTHREAD Thread) 336 { 337 UNREFERENCED_PARAMETER(Thread); 338 return FALSE; 339 } 340 341 FORCEINLINE 342 VOID 343 KiCheckDeferredReadyList(IN PKPRCB Prcb) 344 { 345 /* There are no deferred ready lists on UP systems */ 346 UNREFERENCED_PARAMETER(Prcb); 347 } 348 349 FORCEINLINE 350 VOID 351 KiRundownThread(IN PKTHREAD Thread) 352 { 353 #if defined(_M_IX86) || defined(_M_AMD64) 354 /* Check if this is the NPX Thread */ 355 if (KeGetCurrentPrcb()->NpxThread == Thread) 356 { 357 /* Clear it */ 358 KeGetCurrentPrcb()->NpxThread = NULL; 359 KeArchFnInit(); 360 } 361 #endif 362 } 363 364 FORCEINLINE 365 VOID 366 KiRequestApcInterrupt(IN BOOLEAN NeedApc, 367 IN UCHAR Processor) 368 { 369 /* We deliver instantly on UP */ 370 UNREFERENCED_PARAMETER(NeedApc); 371 UNREFERENCED_PARAMETER(Processor); 372 } 373 374 FORCEINLINE 375 PKSPIN_LOCK_QUEUE 376 KiAcquireTimerLock(IN ULONG Hand) 377 { 378 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 379 380 /* Nothing to do on UP */ 381 UNREFERENCED_PARAMETER(Hand); 382 return NULL; 383 } 384 385 FORCEINLINE 386 VOID 387 KiReleaseTimerLock(IN PKSPIN_LOCK_QUEUE LockQueue) 388 { 389 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 390 391 /* Nothing to do on UP */ 392 UNREFERENCED_PARAMETER(LockQueue); 393 } 394 395 #else 396 397 // 398 // Spinlock Acquisition at IRQL >= DISPATCH_LEVEL 399 // 400 FORCEINLINE 401 VOID 402 KxAcquireSpinLock(IN PKSPIN_LOCK SpinLock) 403 { 404 for (;;) 405 { 406 /* Try to acquire it */ 407 if (InterlockedBitTestAndSet((PLONG)SpinLock, 0)) 408 { 409 /* Value changed... wait until it's locked */ 410 while (*(volatile KSPIN_LOCK *)SpinLock == 1) 411 { 412 #ifdef DBG 413 /* On debug builds, we use a much slower but useful routine */ 414 Kii386SpinOnSpinLock(SpinLock, 5); 415 #else 416 /* Otherwise, just yield and keep looping */ 417 YieldProcessor(); 418 #endif 419 } 420 } 421 else 422 { 423 #ifdef DBG 424 /* On debug builds, we OR in the KTHREAD */ 425 *SpinLock = KeGetCurrentThread() | 1; 426 #endif 427 /* All is well, break out */ 428 break; 429 } 430 } 431 } 432 433 // 434 // Spinlock Release at IRQL >= DISPATCH_LEVEL 435 // 436 FORCEINLINE 437 VOID 438 KxReleaseSpinLock(IN PKSPIN_LOCK SpinLock) 439 { 440 #ifdef DBG 441 /* Make sure that the threads match */ 442 if ((KeGetCurrentThread() | 1) != *SpinLock) 443 { 444 /* They don't, bugcheck */ 445 KeBugCheckEx(SPIN_LOCK_NOT_OWNED, SpinLock, 0, 0, 0); 446 } 447 #endif 448 /* Clear the lock */ 449 InterlockedAnd(SpinLock, 0); 450 } 451 452 KIRQL 453 FORCEINLINE 454 KiAcquireDispatcherObject(IN DISPATCHER_HEADER* Object) 455 { 456 LONG OldValue, NewValue; 457 458 /* Make sure we're at a safe level to touch the lock */ 459 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 460 461 /* Start acquire loop */ 462 do 463 { 464 /* Loop until the other CPU releases it */ 465 while ((UCHAR)Object->Lock & KOBJECT_LOCK_BIT) 466 { 467 /* Let the CPU know that this is a loop */ 468 YieldProcessor(); 469 }; 470 471 /* Try acquiring the lock now */ 472 NewValue = InterlockedCompareExchange(&Object->Lock, 473 OldValue | KOBJECT_LOCK_BIT, 474 OldValue); 475 } while (NewValue != OldValue); 476 } 477 478 KIRQL 479 FORCEINLINE 480 KiReleaseDispatcherObject(IN DISPATCHER_HEADER* Object) 481 { 482 /* Make sure we're at a safe level to touch the lock */ 483 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 484 485 /* Release it */ 486 InterlockedAnd(&Object->Lock, ~KOBJECT_LOCK_BIT); 487 } 488 489 KIRQL 490 FORCEINLINE 491 KiAcquireDispatcherLock(VOID) 492 { 493 /* Raise to synchronization level and acquire the dispatcher lock */ 494 return KeAcquireQueuedSpinLockRaiseToSynch(LockQueueDispatcherLock); 495 } 496 497 VOID 498 FORCEINLINE 499 KiReleaseDispatcherLock(IN KIRQL OldIrql) 500 { 501 /* First release the lock */ 502 KeReleaseQueuedSpinLockFromDpcLevel(&KeGetCurrentPrcb()-> 503 LockQueue[LockQueueDispatcherLock]); 504 505 /* Then exit the dispatcher */ 506 KiExitDispatcher(OldIrql); 507 } 508 509 // 510 // This routine inserts a thread into the deferred ready list of the given CPU 511 // 512 FORCEINLINE 513 VOID 514 KiInsertDeferredReadyList(IN PKTHREAD Thread) 515 { 516 PKPRCB Prcb = KeGetCurrentPrcb(); 517 518 /* Set the thread to deferred state and CPU */ 519 Thread->State = DeferredReady; 520 Thread->DeferredProcessor = Prcb->Number; 521 522 /* Add it on the list */ 523 PushEntryList(&Prcb->DeferredReadyListHead, &Thread->SwapListEntry); 524 } 525 526 FORCEINLINE 527 VOID 528 KiRescheduleThread(IN BOOLEAN NewThread, 529 IN ULONG Cpu) 530 { 531 /* Check if a new thread needs to be scheduled on a different CPU */ 532 if ((NewThread) && !(KeGetPcr()->Number == Cpu)) 533 { 534 /* Send an IPI to request delivery */ 535 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC); 536 } 537 } 538 539 // 540 // This routine sets the current thread in a swap busy state, which ensure that 541 // nobody else tries to swap it concurrently. 542 // 543 FORCEINLINE 544 VOID 545 KiSetThreadSwapBusy(IN PKTHREAD Thread) 546 { 547 /* Make sure nobody already set it */ 548 ASSERT(Thread->SwapBusy == FALSE); 549 550 /* Set it ourselves */ 551 Thread->SwapBusy = TRUE; 552 } 553 554 // 555 // This routine acquires the PRCB lock so that only one caller can touch 556 // volatile PRCB data. 557 // 558 // Since this is a simple optimized spin-lock, it must be be only acquired 559 // at dispatcher level or higher! 560 // 561 FORCEINLINE 562 VOID 563 KiAcquirePrcbLock(IN PKPRCB Prcb) 564 { 565 /* Make sure we're at a safe level to touch the PRCB lock */ 566 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 567 568 /* Start acquire loop */ 569 for (;;) 570 { 571 /* Acquire the lock and break out if we acquired it first */ 572 if (!InterlockedExchange(&Prcb->PrcbLock, 1)) break; 573 574 /* Loop until the other CPU releases it */ 575 do 576 { 577 /* Let the CPU know that this is a loop */ 578 YieldProcessor(); 579 } while (Prcb->PrcbLock); 580 } 581 } 582 583 // 584 // This routine releases the PRCB lock so that other callers can touch 585 // volatile PRCB data. 586 // 587 // Since this is a simple optimized spin-lock, it must be be only acquired 588 // at dispatcher level or higher! 589 // 590 FORCEINLINE 591 VOID 592 KiReleasePrcbLock(IN PKPRCB Prcb) 593 { 594 /* Make sure it's acquired! */ 595 ASSERT(Prcb->PrcbLock != 0); 596 597 /* Release it */ 598 InterlockedAnd(&Prcb->PrcbLock, 0); 599 } 600 601 // 602 // This routine acquires the thread lock so that only one caller can touch 603 // volatile thread data. 604 // 605 // Since this is a simple optimized spin-lock, it must be be only acquired 606 // at dispatcher level or higher! 607 // 608 FORCEINLINE 609 VOID 610 KiAcquireThreadLock(IN PKTHREAD Thread) 611 { 612 /* Make sure we're at a safe level to touch the thread lock */ 613 ASSERT(KeGetCurrentIrql() >= DISPATCH_LEVEL); 614 615 /* Start acquire loop */ 616 for (;;) 617 { 618 /* Acquire the lock and break out if we acquired it first */ 619 if (!InterlockedExchange(&Thread->ThreadLock, 1)) break; 620 621 /* Loop until the other CPU releases it */ 622 do 623 { 624 /* Let the CPU know that this is a loop */ 625 YieldProcessor(); 626 } while (Thread->ThreadLock); 627 } 628 } 629 630 // 631 // This routine releases the thread lock so that other callers can touch 632 // volatile thread data. 633 // 634 // Since this is a simple optimized spin-lock, it must be be only acquired 635 // at dispatcher level or higher! 636 // 637 FORCEINLINE 638 VOID 639 KiReleaseThreadLock(IN PKTHREAD Thread) 640 { 641 /* Release it */ 642 InterlockedAnd(&Thread->ThreadLock, 0); 643 } 644 645 FORCEINLINE 646 BOOLEAN 647 KiTryThreadLock(IN PKTHREAD Thread) 648 { 649 LONG Value; 650 651 /* If the lock isn't acquired, return false */ 652 if (!Thread->ThreadLock) return FALSE; 653 654 /* Otherwise, try to acquire it and check the result */ 655 Value = 1; 656 Value = InterlockedExchange(&Thread->ThreadLock, &Value); 657 658 /* Return the lock state */ 659 return (Value == TRUE); 660 } 661 662 FORCEINLINE 663 VOID 664 KiCheckDeferredReadyList(IN PKPRCB Prcb) 665 { 666 /* Scan the deferred ready lists if required */ 667 if (Prcb->DeferredReadyListHead.Next) KiProcessDeferredReadyList(Prcb); 668 } 669 670 FORCEINLINE 671 VOID 672 KiRequestApcInterrupt(IN BOOLEAN NeedApc, 673 IN UCHAR Processor) 674 { 675 /* Check if we need to request APC delivery */ 676 if (NeedApc) 677 { 678 /* Check if it's on another CPU */ 679 if (KeGetPcr()->Number != Cpu) 680 { 681 /* Send an IPI to request delivery */ 682 KiIpiSendRequest(AFFINITY_MASK(Cpu), IPI_DPC); 683 } 684 else 685 { 686 /* Request a software interrupt */ 687 HalRequestSoftwareInterrupt(APC_LEVEL); 688 } 689 } 690 } 691 692 #endif 693 694 FORCEINLINE 695 VOID 696 KiAcquireApcLock(IN PKTHREAD Thread, 697 IN PKLOCK_QUEUE_HANDLE Handle) 698 { 699 /* Acquire the lock and raise to synchronization level */ 700 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Thread->ApcQueueLock, Handle); 701 } 702 703 FORCEINLINE 704 VOID 705 KiAcquireApcLockAtDpcLevel(IN PKTHREAD Thread, 706 IN PKLOCK_QUEUE_HANDLE Handle) 707 { 708 /* Acquire the lock */ 709 KeAcquireInStackQueuedSpinLockAtDpcLevel(&Thread->ApcQueueLock, Handle); 710 } 711 712 FORCEINLINE 713 VOID 714 KiAcquireApcLockAtApcLevel(IN PKTHREAD Thread, 715 IN PKLOCK_QUEUE_HANDLE Handle) 716 { 717 /* Acquire the lock */ 718 KeAcquireInStackQueuedSpinLock(&Thread->ApcQueueLock, Handle); 719 } 720 721 FORCEINLINE 722 VOID 723 KiReleaseApcLock(IN PKLOCK_QUEUE_HANDLE Handle) 724 { 725 /* Release the lock */ 726 KeReleaseInStackQueuedSpinLock(Handle); 727 } 728 729 FORCEINLINE 730 VOID 731 KiReleaseApcLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle) 732 { 733 /* Release the lock */ 734 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle); 735 } 736 737 FORCEINLINE 738 VOID 739 KiAcquireProcessLock(IN PKPROCESS Process, 740 IN PKLOCK_QUEUE_HANDLE Handle) 741 { 742 /* Acquire the lock and raise to synchronization level */ 743 KeAcquireInStackQueuedSpinLockRaiseToSynch(&Process->ProcessLock, Handle); 744 } 745 746 FORCEINLINE 747 VOID 748 KiReleaseProcessLock(IN PKLOCK_QUEUE_HANDLE Handle) 749 { 750 /* Release the lock */ 751 KeReleaseInStackQueuedSpinLock(Handle); 752 } 753 754 FORCEINLINE 755 VOID 756 KiReleaseProcessLockFromDpcLevel(IN PKLOCK_QUEUE_HANDLE Handle) 757 { 758 /* Release the lock */ 759 KeReleaseInStackQueuedSpinLockFromDpcLevel(Handle); 760 } 761 762 FORCEINLINE 763 VOID 764 KiAcquireDeviceQueueLock(IN PKDEVICE_QUEUE DeviceQueue, 765 IN PKLOCK_QUEUE_HANDLE DeviceLock) 766 { 767 /* Check if we were called from a threaded DPC */ 768 if (KeGetCurrentPrcb()->DpcThreadActive) 769 { 770 /* Lock the Queue, we're not at DPC level */ 771 KeAcquireInStackQueuedSpinLock(&DeviceQueue->Lock, DeviceLock); 772 } 773 else 774 { 775 /* We must be at DPC level, acquire the lock safely */ 776 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 777 KeAcquireInStackQueuedSpinLockAtDpcLevel(&DeviceQueue->Lock, 778 DeviceLock); 779 } 780 } 781 782 FORCEINLINE 783 VOID 784 KiReleaseDeviceQueueLock(IN PKLOCK_QUEUE_HANDLE DeviceLock) 785 { 786 /* Check if we were called from a threaded DPC */ 787 if (KeGetCurrentPrcb()->DpcThreadActive) 788 { 789 /* Unlock the Queue, we're not at DPC level */ 790 KeReleaseInStackQueuedSpinLock(DeviceLock); 791 } 792 else 793 { 794 /* We must be at DPC level, release the lock safely */ 795 ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); 796 KeReleaseInStackQueuedSpinLockFromDpcLevel(DeviceLock); 797 } 798 } 799 800 // 801 // Satisfies the wait of any dispatcher object 802 // 803 #define KiSatisfyObjectWait(Object, Thread) \ 804 { \ 805 /* Special case for Mutants */ \ 806 if ((Object)->Header.Type == MutantObject) \ 807 { \ 808 /* Decrease the Signal State */ \ 809 (Object)->Header.SignalState--; \ 810 \ 811 /* Check if it's now non-signaled */ \ 812 if (!(Object)->Header.SignalState) \ 813 { \ 814 /* Set the Owner Thread */ \ 815 (Object)->OwnerThread = Thread; \ 816 \ 817 /* Disable APCs if needed */ \ 818 Thread->KernelApcDisable = Thread->KernelApcDisable - \ 819 (Object)->ApcDisable; \ 820 \ 821 /* Check if it's abandoned */ \ 822 if ((Object)->Abandoned) \ 823 { \ 824 /* Unabandon it */ \ 825 (Object)->Abandoned = FALSE; \ 826 \ 827 /* Return Status */ \ 828 Thread->WaitStatus = STATUS_ABANDONED; \ 829 } \ 830 \ 831 /* Insert it into the Mutant List */ \ 832 InsertHeadList(Thread->MutantListHead.Blink, \ 833 &(Object)->MutantListEntry); \ 834 } \ 835 } \ 836 else if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \ 837 EventSynchronizationObject) \ 838 { \ 839 /* Synchronization Timers and Events just get un-signaled */ \ 840 (Object)->Header.SignalState = 0; \ 841 } \ 842 else if ((Object)->Header.Type == SemaphoreObject) \ 843 { \ 844 /* These ones can have multiple states, so we only decrease it */ \ 845 (Object)->Header.SignalState--; \ 846 } \ 847 } 848 849 // 850 // Satisfies the wait of a mutant dispatcher object 851 // 852 #define KiSatisfyMutantWait(Object, Thread) \ 853 { \ 854 /* Decrease the Signal State */ \ 855 (Object)->Header.SignalState--; \ 856 \ 857 /* Check if it's now non-signaled */ \ 858 if (!(Object)->Header.SignalState) \ 859 { \ 860 /* Set the Owner Thread */ \ 861 (Object)->OwnerThread = Thread; \ 862 \ 863 /* Disable APCs if needed */ \ 864 Thread->KernelApcDisable = Thread->KernelApcDisable - \ 865 (Object)->ApcDisable; \ 866 \ 867 /* Check if it's abandoned */ \ 868 if ((Object)->Abandoned) \ 869 { \ 870 /* Unabandon it */ \ 871 (Object)->Abandoned = FALSE; \ 872 \ 873 /* Return Status */ \ 874 Thread->WaitStatus = STATUS_ABANDONED; \ 875 } \ 876 \ 877 /* Insert it into the Mutant List */ \ 878 InsertHeadList(Thread->MutantListHead.Blink, \ 879 &(Object)->MutantListEntry); \ 880 } \ 881 } 882 883 // 884 // Satisfies the wait of any nonmutant dispatcher object 885 // 886 #define KiSatisfyNonMutantWait(Object) \ 887 { \ 888 if (((Object)->Header.Type & TIMER_OR_EVENT_TYPE) == \ 889 EventSynchronizationObject) \ 890 { \ 891 /* Synchronization Timers and Events just get un-signaled */ \ 892 (Object)->Header.SignalState = 0; \ 893 } \ 894 else if ((Object)->Header.Type == SemaphoreObject) \ 895 { \ 896 /* These ones can have multiple states, so we only decrease it */ \ 897 (Object)->Header.SignalState--; \ 898 } \ 899 } 900 901 // 902 // Recalculates the due time 903 // 904 PLARGE_INTEGER 905 FORCEINLINE 906 KiRecalculateDueTime(IN PLARGE_INTEGER OriginalDueTime, 907 IN PLARGE_INTEGER DueTime, 908 IN OUT PLARGE_INTEGER NewDueTime) 909 { 910 /* Don't do anything for absolute waits */ 911 if (OriginalDueTime->QuadPart >= 0) return OriginalDueTime; 912 913 /* Otherwise, query the interrupt time and recalculate */ 914 NewDueTime->QuadPart = KeQueryInterruptTime(); 915 NewDueTime->QuadPart -= DueTime->QuadPart; 916 return NewDueTime; 917 } 918 919 // 920 // Determines whether a thread should be added to the wait list 921 // 922 FORCEINLINE 923 BOOLEAN 924 KiCheckThreadStackSwap(IN PKTHREAD Thread, 925 IN KPROCESSOR_MODE WaitMode) 926 { 927 /* Check the required conditions */ 928 if ((WaitMode != KernelMode) && 929 (Thread->EnableStackSwap) && 930 (Thread->Priority >= (LOW_REALTIME_PRIORITY + 9))) 931 { 932 /* We are go for swap */ 933 return TRUE; 934 } 935 else 936 { 937 /* Don't swap the thread */ 938 return FALSE; 939 } 940 } 941 942 // 943 // Adds a thread to the wait list 944 // 945 #define KiAddThreadToWaitList(Thread, Swappable) \ 946 { \ 947 /* Make sure it's swappable */ \ 948 if (Swappable) \ 949 { \ 950 /* Insert it into the PRCB's List */ \ 951 InsertTailList(&KeGetCurrentPrcb()->WaitListHead, \ 952 &Thread->WaitListEntry); \ 953 } \ 954 } 955 956 // 957 // Checks if a wait in progress should be interrupted by APCs or an alertable 958 // state. 959 // 960 FORCEINLINE 961 NTSTATUS 962 KiCheckAlertability(IN PKTHREAD Thread, 963 IN BOOLEAN Alertable, 964 IN KPROCESSOR_MODE WaitMode) 965 { 966 /* Check if the wait is alertable */ 967 if (Alertable) 968 { 969 /* It is, first check if the thread is alerted in this mode */ 970 if (Thread->Alerted[WaitMode]) 971 { 972 /* It is, so bail out of the wait */ 973 Thread->Alerted[WaitMode] = FALSE; 974 return STATUS_ALERTED; 975 } 976 else if ((WaitMode != KernelMode) && 977 (!IsListEmpty(&Thread->ApcState.ApcListHead[UserMode]))) 978 { 979 /* It's isn't, but this is a user wait with queued user APCs */ 980 Thread->ApcState.UserApcPending = TRUE; 981 return STATUS_USER_APC; 982 } 983 else if (Thread->Alerted[KernelMode]) 984 { 985 /* It isn't that either, but we're alered in kernel mode */ 986 Thread->Alerted[KernelMode] = FALSE; 987 return STATUS_ALERTED; 988 } 989 } 990 else if ((WaitMode != KernelMode) && (Thread->ApcState.UserApcPending)) 991 { 992 /* Not alertable, but this is a user wait with pending user APCs */ 993 return STATUS_USER_APC; 994 } 995 996 /* Otherwise, we're fine */ 997 return STATUS_WAIT_0; 998 } 999 1000 // 1001 // Called by Wait and Queue code to insert a timer for dispatching. 1002 // Also called by KeSetTimerEx to insert a timer from the caller. 1003 // 1004 VOID 1005 FORCEINLINE 1006 KxInsertTimer(IN PKTIMER Timer, 1007 IN ULONG Hand) 1008 { 1009 PKSPIN_LOCK_QUEUE LockQueue; 1010 1011 /* Acquire the lock and release the dispatcher lock */ 1012 LockQueue = KiAcquireTimerLock(Hand); 1013 KiReleaseDispatcherLockFromDpcLevel(); 1014 1015 /* Try to insert the timer */ 1016 if (KiInsertTimerTable(Timer, Hand)) 1017 { 1018 /* Complete it */ 1019 KiCompleteTimer(Timer, LockQueue); 1020 } 1021 else 1022 { 1023 /* Do nothing, just release the lock */ 1024 KiReleaseTimerLock(LockQueue); 1025 } 1026 } 1027 1028 // 1029 // Called from Unlink and Queue Insert Code. 1030 // Also called by timer code when canceling an inserted timer. 1031 // Removes a timer from it's tree. 1032 // 1033 VOID 1034 FORCEINLINE 1035 KxRemoveTreeTimer(IN PKTIMER Timer) 1036 { 1037 ULONG Hand = Timer->Header.Hand; 1038 PKSPIN_LOCK_QUEUE LockQueue; 1039 PKTIMER_TABLE_ENTRY TimerEntry; 1040 1041 /* Acquire timer lock */ 1042 LockQueue = KiAcquireTimerLock(Hand); 1043 1044 /* Set the timer as non-inserted */ 1045 Timer->Header.Inserted = FALSE; 1046 1047 /* Remove it from the timer list */ 1048 if (RemoveEntryList(&Timer->TimerListEntry)) 1049 { 1050 /* Get the entry and check if it's empty */ 1051 TimerEntry = &KiTimerTableListHead[Hand]; 1052 if (IsListEmpty(&TimerEntry->Entry)) 1053 { 1054 /* Clear the time then */ 1055 TimerEntry->Time.HighPart = 0xFFFFFFFF; 1056 } 1057 } 1058 1059 /* Release the timer lock */ 1060 KiReleaseTimerLock(LockQueue); 1061 } 1062 1063 VOID 1064 FORCEINLINE 1065 KxSetTimerForThreadWait(IN PKTIMER Timer, 1066 IN LARGE_INTEGER Interval, 1067 OUT PULONG Hand) 1068 { 1069 ULONGLONG DueTime; 1070 LARGE_INTEGER InterruptTime, SystemTime, TimeDifference; 1071 1072 /* Check the timer's interval to see if it's absolute */ 1073 Timer->Header.Absolute = FALSE; 1074 if (Interval.HighPart >= 0) 1075 { 1076 /* Get the system time and calculate the relative time */ 1077 KeQuerySystemTime(&SystemTime); 1078 TimeDifference.QuadPart = SystemTime.QuadPart - Interval.QuadPart; 1079 Timer->Header.Absolute = TRUE; 1080 1081 /* Check if we've already expired */ 1082 if (TimeDifference.HighPart >= 0) 1083 { 1084 /* Reset everything */ 1085 Timer->DueTime.QuadPart = 0; 1086 *Hand = 0; 1087 Timer->Header.Hand = 0; 1088 return; 1089 } 1090 else 1091 { 1092 /* Update the interval */ 1093 Interval = TimeDifference; 1094 } 1095 } 1096 1097 /* Calculate the due time */ 1098 InterruptTime.QuadPart = KeQueryInterruptTime(); 1099 DueTime = InterruptTime.QuadPart - Interval.QuadPart; 1100 Timer->DueTime.QuadPart = DueTime; 1101 1102 /* Calculate the timer handle */ 1103 *Hand = KiComputeTimerTableIndex(DueTime); 1104 Timer->Header.Hand = (UCHAR)*Hand; 1105 } 1106 1107 #define KxDelayThreadWait() \ 1108 \ 1109 /* Setup the Wait Block */ \ 1110 Thread->WaitBlockList = TimerBlock; \ 1111 \ 1112 /* Setup the timer */ \ 1113 KxSetTimerForThreadWait(Timer, *Interval, &Hand); \ 1114 \ 1115 /* Save the due time for the caller */ \ 1116 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1117 \ 1118 /* Link the timer to this Wait Block */ \ 1119 TimerBlock->NextWaitBlock = TimerBlock; \ 1120 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \ 1121 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \ 1122 \ 1123 /* Clear wait status */ \ 1124 Thread->WaitStatus = STATUS_SUCCESS; \ 1125 \ 1126 /* Setup wait fields */ \ 1127 Thread->Alertable = Alertable; \ 1128 Thread->WaitReason = DelayExecution; \ 1129 Thread->WaitMode = WaitMode; \ 1130 \ 1131 /* Check if we can swap the thread's stack */ \ 1132 Thread->WaitListEntry.Flink = NULL; \ 1133 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1134 \ 1135 /* Set the wait time */ \ 1136 Thread->WaitTime = KeTickCount.LowPart; 1137 1138 #define KxMultiThreadWait() \ 1139 /* Link wait block array to the thread */ \ 1140 Thread->WaitBlockList = WaitBlockArray; \ 1141 \ 1142 /* Reset the index */ \ 1143 Index = 0; \ 1144 \ 1145 /* Loop wait blocks */ \ 1146 do \ 1147 { \ 1148 /* Fill out the wait block */ \ 1149 WaitBlock = &WaitBlockArray[Index]; \ 1150 WaitBlock->Object = Object[Index]; \ 1151 WaitBlock->WaitKey = (USHORT)Index; \ 1152 WaitBlock->WaitType = WaitType; \ 1153 WaitBlock->Thread = Thread; \ 1154 \ 1155 /* Link to next block */ \ 1156 WaitBlock->NextWaitBlock = &WaitBlockArray[Index + 1]; \ 1157 Index++; \ 1158 } while (Index < Count); \ 1159 \ 1160 /* Link the last block */ \ 1161 WaitBlock->NextWaitBlock = WaitBlockArray; \ 1162 \ 1163 /* Set default wait status */ \ 1164 Thread->WaitStatus = STATUS_WAIT_0; \ 1165 \ 1166 /* Check if we have a timer */ \ 1167 if (Timeout) \ 1168 { \ 1169 /* Link to the block */ \ 1170 TimerBlock->NextWaitBlock = WaitBlockArray; \ 1171 \ 1172 /* Setup the timer */ \ 1173 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \ 1174 \ 1175 /* Save the due time for the caller */ \ 1176 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1177 \ 1178 /* Initialize the list */ \ 1179 InitializeListHead(&Timer->Header.WaitListHead); \ 1180 } \ 1181 \ 1182 /* Set wait settings */ \ 1183 Thread->Alertable = Alertable; \ 1184 Thread->WaitMode = WaitMode; \ 1185 Thread->WaitReason = WaitReason; \ 1186 \ 1187 /* Check if we can swap the thread's stack */ \ 1188 Thread->WaitListEntry.Flink = NULL; \ 1189 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1190 \ 1191 /* Set the wait time */ \ 1192 Thread->WaitTime = KeTickCount.LowPart; 1193 1194 #define KxSingleThreadWait() \ 1195 /* Setup the Wait Block */ \ 1196 Thread->WaitBlockList = WaitBlock; \ 1197 WaitBlock->WaitKey = STATUS_SUCCESS; \ 1198 WaitBlock->Object = Object; \ 1199 WaitBlock->WaitType = WaitAny; \ 1200 \ 1201 /* Clear wait status */ \ 1202 Thread->WaitStatus = STATUS_SUCCESS; \ 1203 \ 1204 /* Check if we have a timer */ \ 1205 if (Timeout) \ 1206 { \ 1207 /* Setup the timer */ \ 1208 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \ 1209 \ 1210 /* Save the due time for the caller */ \ 1211 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1212 \ 1213 /* Pointer to timer block */ \ 1214 WaitBlock->NextWaitBlock = TimerBlock; \ 1215 TimerBlock->NextWaitBlock = WaitBlock; \ 1216 \ 1217 /* Link the timer to this Wait Block */ \ 1218 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \ 1219 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \ 1220 } \ 1221 else \ 1222 { \ 1223 /* No timer block, just ourselves */ \ 1224 WaitBlock->NextWaitBlock = WaitBlock; \ 1225 } \ 1226 \ 1227 /* Set wait settings */ \ 1228 Thread->Alertable = Alertable; \ 1229 Thread->WaitMode = WaitMode; \ 1230 Thread->WaitReason = WaitReason; \ 1231 \ 1232 /* Check if we can swap the thread's stack */ \ 1233 Thread->WaitListEntry.Flink = NULL; \ 1234 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1235 \ 1236 /* Set the wait time */ \ 1237 Thread->WaitTime = KeTickCount.LowPart; 1238 1239 #define KxQueueThreadWait() \ 1240 /* Setup the Wait Block */ \ 1241 Thread->WaitBlockList = WaitBlock; \ 1242 WaitBlock->WaitKey = STATUS_SUCCESS; \ 1243 WaitBlock->Object = Queue; \ 1244 WaitBlock->WaitType = WaitAny; \ 1245 WaitBlock->Thread = Thread; \ 1246 \ 1247 /* Clear wait status */ \ 1248 Thread->WaitStatus = STATUS_SUCCESS; \ 1249 \ 1250 /* Check if we have a timer */ \ 1251 if (Timeout) \ 1252 { \ 1253 /* Setup the timer */ \ 1254 KxSetTimerForThreadWait(Timer, *Timeout, &Hand); \ 1255 \ 1256 /* Save the due time for the caller */ \ 1257 DueTime.QuadPart = Timer->DueTime.QuadPart; \ 1258 \ 1259 /* Pointer to timer block */ \ 1260 WaitBlock->NextWaitBlock = TimerBlock; \ 1261 TimerBlock->NextWaitBlock = WaitBlock; \ 1262 \ 1263 /* Link the timer to this Wait Block */ \ 1264 Timer->Header.WaitListHead.Flink = &TimerBlock->WaitListEntry; \ 1265 Timer->Header.WaitListHead.Blink = &TimerBlock->WaitListEntry; \ 1266 } \ 1267 else \ 1268 { \ 1269 /* No timer block, just ourselves */ \ 1270 WaitBlock->NextWaitBlock = WaitBlock; \ 1271 } \ 1272 \ 1273 /* Set wait settings */ \ 1274 Thread->Alertable = FALSE; \ 1275 Thread->WaitMode = WaitMode; \ 1276 Thread->WaitReason = WrQueue; \ 1277 \ 1278 /* Check if we can swap the thread's stack */ \ 1279 Thread->WaitListEntry.Flink = NULL; \ 1280 Swappable = KiCheckThreadStackSwap(Thread, WaitMode); \ 1281 \ 1282 /* Set the wait time */ \ 1283 Thread->WaitTime = KeTickCount.LowPart; 1284 1285 // 1286 // Unwaits a Thread 1287 // 1288 FORCEINLINE 1289 VOID 1290 KxUnwaitThread(IN DISPATCHER_HEADER *Object, 1291 IN KPRIORITY Increment) 1292 { 1293 PLIST_ENTRY WaitEntry, WaitList; 1294 PKWAIT_BLOCK WaitBlock; 1295 PKTHREAD WaitThread; 1296 ULONG WaitKey; 1297 1298 /* Loop the Wait Entries */ 1299 WaitList = &Object->WaitListHead; 1300 ASSERT(IsListEmpty(&Object->WaitListHead) == FALSE); 1301 WaitEntry = WaitList->Flink; 1302 do 1303 { 1304 /* Get the current wait block */ 1305 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry); 1306 1307 /* Get the waiting thread */ 1308 WaitThread = WaitBlock->Thread; 1309 1310 /* Check the current Wait Mode */ 1311 if (WaitBlock->WaitType == WaitAny) 1312 { 1313 /* Use the actual wait key */ 1314 WaitKey = WaitBlock->WaitKey; 1315 } 1316 else 1317 { 1318 /* Otherwise, use STATUS_KERNEL_APC */ 1319 WaitKey = STATUS_KERNEL_APC; 1320 } 1321 1322 /* Unwait the thread */ 1323 KiUnwaitThread(WaitThread, WaitKey, Increment); 1324 1325 /* Next entry */ 1326 WaitEntry = WaitList->Flink; 1327 } while (WaitEntry != WaitList); 1328 } 1329 1330 // 1331 // Unwaits a Thread waiting on an event 1332 // 1333 FORCEINLINE 1334 VOID 1335 KxUnwaitThreadForEvent(IN PKEVENT Event, 1336 IN KPRIORITY Increment) 1337 { 1338 PLIST_ENTRY WaitEntry, WaitList; 1339 PKWAIT_BLOCK WaitBlock; 1340 PKTHREAD WaitThread; 1341 1342 /* Loop the Wait Entries */ 1343 WaitList = &Event->Header.WaitListHead; 1344 ASSERT(IsListEmpty(&Event->Header.WaitListHead) == FALSE); 1345 WaitEntry = WaitList->Flink; 1346 do 1347 { 1348 /* Get the current wait block */ 1349 WaitBlock = CONTAINING_RECORD(WaitEntry, KWAIT_BLOCK, WaitListEntry); 1350 1351 /* Get the waiting thread */ 1352 WaitThread = WaitBlock->Thread; 1353 1354 /* Check the current Wait Mode */ 1355 if (WaitBlock->WaitType == WaitAny) 1356 { 1357 /* Un-signal it */ 1358 Event->Header.SignalState = 0; 1359 1360 /* Un-signal the event and unwait the thread */ 1361 KiUnwaitThread(WaitThread, WaitBlock->WaitKey, Increment); 1362 break; 1363 } 1364 1365 /* Unwait the thread with STATUS_KERNEL_APC */ 1366 KiUnwaitThread(WaitThread, STATUS_KERNEL_APC, Increment); 1367 1368 /* Next entry */ 1369 WaitEntry = WaitList->Flink; 1370 } while (WaitEntry != WaitList); 1371 } 1372 1373 // 1374 // This routine queues a thread that is ready on the PRCB's ready lists. 1375 // If this thread cannot currently run on this CPU, then the thread is 1376 // added to the deferred ready list instead. 1377 // 1378 // This routine must be entered with the PRCB lock held and it will exit 1379 // with the PRCB lock released! 1380 // 1381 FORCEINLINE 1382 VOID 1383 KxQueueReadyThread(IN PKTHREAD Thread, 1384 IN PKPRCB Prcb) 1385 { 1386 BOOLEAN Preempted; 1387 KPRIORITY Priority; 1388 1389 /* Sanity checks */ 1390 ASSERT(Prcb == KeGetCurrentPrcb()); 1391 ASSERT(Thread->State == Running); 1392 ASSERT(Thread->NextProcessor == Prcb->Number); 1393 1394 /* Check if this thread is allowed to run in this CPU */ 1395 #ifdef CONFIG_SMP 1396 if ((Thread->Affinity) & (Prcb->SetMember)) 1397 #else 1398 if (TRUE) 1399 #endif 1400 { 1401 /* Set thread ready for execution */ 1402 Thread->State = Ready; 1403 1404 /* Save current priority and if someone had pre-empted it */ 1405 Priority = Thread->Priority; 1406 Preempted = Thread->Preempted; 1407 1408 /* We're not pre-empting now, and set the wait time */ 1409 Thread->Preempted = FALSE; 1410 Thread->WaitTime = KeTickCount.LowPart; 1411 1412 /* Sanity check */ 1413 ASSERT((Priority >= 0) && (Priority <= HIGH_PRIORITY)); 1414 1415 /* Insert this thread in the appropriate order */ 1416 Preempted ? InsertHeadList(&Prcb->DispatcherReadyListHead[Priority], 1417 &Thread->WaitListEntry) : 1418 InsertTailList(&Prcb->DispatcherReadyListHead[Priority], 1419 &Thread->WaitListEntry); 1420 1421 /* Update the ready summary */ 1422 Prcb->ReadySummary |= PRIORITY_MASK(Priority); 1423 1424 /* Sanity check */ 1425 ASSERT(Priority == Thread->Priority); 1426 1427 /* Release the PRCB lock */ 1428 KiReleasePrcbLock(Prcb); 1429 } 1430 else 1431 { 1432 /* Otherwise, prepare this thread to be deferred */ 1433 Thread->State = DeferredReady; 1434 Thread->DeferredProcessor = Prcb->Number; 1435 1436 /* Release the lock and defer scheduling */ 1437 KiReleasePrcbLock(Prcb); 1438 KiDeferredReadyThread(Thread); 1439 } 1440 } 1441 1442 // 1443 // This routine scans for an appropriate ready thread to select at the 1444 // given priority and for the given CPU. 1445 // 1446 FORCEINLINE 1447 PKTHREAD 1448 KiSelectReadyThread(IN KPRIORITY Priority, 1449 IN PKPRCB Prcb) 1450 { 1451 ULONG PrioritySet, HighPriority; 1452 PLIST_ENTRY ListEntry; 1453 PKTHREAD Thread = NULL; 1454 1455 /* Save the current mask and get the priority set for the CPU */ 1456 PrioritySet = Prcb->ReadySummary >> Priority; 1457 if (!PrioritySet) goto Quickie; 1458 1459 /* Get the highest priority possible */ 1460 BitScanReverse((PULONG)&HighPriority, PrioritySet); 1461 ASSERT((PrioritySet & PRIORITY_MASK(HighPriority)) != 0); 1462 HighPriority += Priority; 1463 1464 /* Make sure the list isn't empty at the highest priority */ 1465 ASSERT(IsListEmpty(&Prcb->DispatcherReadyListHead[HighPriority]) == FALSE); 1466 1467 /* Get the first thread on the list */ 1468 ListEntry = Prcb->DispatcherReadyListHead[HighPriority].Flink; 1469 Thread = CONTAINING_RECORD(ListEntry, KTHREAD, WaitListEntry); 1470 1471 /* Make sure this thread is here for a reason */ 1472 ASSERT(HighPriority == Thread->Priority); 1473 ASSERT(Thread->Affinity & AFFINITY_MASK(Prcb->Number)); 1474 ASSERT(Thread->NextProcessor == Prcb->Number); 1475 1476 /* Remove it from the list */ 1477 if (RemoveEntryList(&Thread->WaitListEntry)) 1478 { 1479 /* The list is empty now, reset the ready summary */ 1480 Prcb->ReadySummary ^= PRIORITY_MASK(HighPriority); 1481 } 1482 1483 /* Sanity check and return the thread */ 1484 Quickie: 1485 ASSERT((Thread == NULL) || 1486 (Thread->BasePriority == 0) || 1487 (Thread->Priority != 0)); 1488 return Thread; 1489 } 1490 1491 // 1492 // This routine computes the new priority for a thread. It is only valid for 1493 // threads with priorities in the dynamic priority range. 1494 // 1495 SCHAR 1496 FORCEINLINE 1497 KiComputeNewPriority(IN PKTHREAD Thread, 1498 IN SCHAR Adjustment) 1499 { 1500 SCHAR Priority; 1501 1502 /* Priority sanity checks */ 1503 ASSERT((Thread->PriorityDecrement >= 0) && 1504 (Thread->PriorityDecrement <= Thread->Priority)); 1505 ASSERT((Thread->Priority < LOW_REALTIME_PRIORITY) ? 1506 TRUE : (Thread->PriorityDecrement == 0)); 1507 1508 /* Get the current priority */ 1509 Priority = Thread->Priority; 1510 if (Priority < LOW_REALTIME_PRIORITY) 1511 { 1512 /* Decrease priority by the priority decrement */ 1513 Priority -= (Thread->PriorityDecrement + Adjustment); 1514 1515 /* Don't go out of bounds */ 1516 if (Priority < Thread->BasePriority) Priority = Thread->BasePriority; 1517 1518 /* Reset the priority decrement */ 1519 Thread->PriorityDecrement = 0; 1520 } 1521 1522 /* Sanity check */ 1523 ASSERT((Thread->BasePriority == 0) || (Priority != 0)); 1524 1525 /* Return the new priority */ 1526 return Priority; 1527 } 1528 1529 #ifndef _M_ARM 1530 PRKTHREAD 1531 FORCEINLINE 1532 KeGetCurrentThread(VOID) 1533 { 1534 #ifdef _M_IX86 1535 /* Return the current thread */ 1536 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread; 1537 #else 1538 PKPRCB Prcb = KeGetCurrentPrcb(); 1539 return Prcb->CurrentThread; 1540 #endif 1541 } 1542 1543 UCHAR 1544 FORCEINLINE 1545 KeGetPreviousMode(VOID) 1546 { 1547 /* Return the current mode */ 1548 return KeGetCurrentThread()->PreviousMode; 1549 } 1550 #endif 1551 1552 VOID 1553 FORCEINLINE 1554 KeFlushProcessTb(VOID) 1555 { 1556 /* Flush the TLB by resetting CR3 */ 1557 #ifdef _M_PPC 1558 __asm__("sync\n\tisync\n\t"); 1559 #elif _M_ARM 1560 // 1561 // We need to implement this! 1562 // 1563 ASSERTMSG("Need ARM flush routine\n", FALSE); 1564 #else 1565 __writecr3(__readcr3()); 1566 #endif 1567 } 1568 1569