1 #pragma once 2 3 #ifndef __ASM__ 4 5 #include "intrin_i.h" 6 7 // 8 // Thread Dispatcher Header DebugActive Mask 9 // 10 #define DR_MASK(x) (1 << (x)) 11 #define DR_REG_MASK 0x4F 12 13 // 14 // INT3 is 1 byte long 15 // 16 #define KD_BREAKPOINT_TYPE UCHAR 17 #define KD_BREAKPOINT_SIZE sizeof(UCHAR) 18 #define KD_BREAKPOINT_VALUE 0xCC 19 20 // 21 // Macros for getting and setting special purpose registers in portable code 22 // 23 #define KeGetContextPc(Context) \ 24 ((Context)->Eip) 25 26 #define KeSetContextPc(Context, ProgramCounter) \ 27 ((Context)->Eip = (ProgramCounter)) 28 29 #define KeGetTrapFramePc(TrapFrame) \ 30 ((TrapFrame)->Eip) 31 32 #define KiGetLinkedTrapFrame(x) \ 33 (PKTRAP_FRAME)((x)->Edx) 34 35 #define KeGetContextReturnRegister(Context) \ 36 ((Context)->Eax) 37 38 #define KeSetContextReturnRegister(Context, ReturnValue) \ 39 ((Context)->Eax = (ReturnValue)) 40 41 // 42 // Macro to get trap and exception frame from a thread stack 43 // 44 #define KeGetTrapFrame(Thread) \ 45 (PKTRAP_FRAME)((ULONG_PTR)((Thread)->InitialStack) - \ 46 sizeof(KTRAP_FRAME) - \ 47 sizeof(FX_SAVE_AREA)) 48 49 #define KeGetExceptionFrame(Thread) \ 50 NULL 51 52 // 53 // Macro to get context switches from the PRCB 54 // All architectures but x86 have it in the PRCB's KeContextSwitches 55 // 56 #define KeGetContextSwitches(Prcb) \ 57 CONTAINING_RECORD(Prcb, KIPCR, PrcbData)->ContextSwitches 58 59 // 60 // Macro to get the second level cache size field name which differs between 61 // CISC and RISC architectures, as the former has unified I/D cache 62 // 63 #define KiGetSecondLevelDCacheSize() ((PKIPCR)KeGetPcr())->SecondLevelCacheSize 64 65 // 66 // Returns the Interrupt State from a Trap Frame. 67 // ON = TRUE, OFF = FALSE 68 // 69 #define KeGetTrapFrameInterruptState(TrapFrame) \ 70 BooleanFlagOn((TrapFrame)->EFlags, EFLAGS_INTERRUPT_MASK) 71 72 // 73 // Flags for exiting a trap 74 // 75 #define KTE_SKIP_PM_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipPreviousMode = TRUE } }).Bits) 76 #define KTE_SKIP_SEG_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipSegments = TRUE } }).Bits) 77 #define KTE_SKIP_VOL_BIT (((KTRAP_EXIT_SKIP_BITS) { { .SkipVolatiles = TRUE } }).Bits) 78 79 typedef union _KTRAP_EXIT_SKIP_BITS 80 { 81 struct 82 { 83 UCHAR SkipPreviousMode:1; 84 UCHAR SkipSegments:1; 85 UCHAR SkipVolatiles:1; 86 UCHAR Reserved:5; 87 }; 88 UCHAR Bits; 89 } KTRAP_EXIT_SKIP_BITS, *PKTRAP_EXIT_SKIP_BITS; 90 91 92 // 93 // Flags used by the VDM/V8086 emulation engine for determining instruction prefixes 94 // 95 #define PFX_FLAG_ES 0x00000100 96 #define PFX_FLAG_CS 0x00000200 97 #define PFX_FLAG_SS 0x00000400 98 #define PFX_FLAG_DS 0x00000800 99 #define PFX_FLAG_FS 0x00001000 100 #define PFX_FLAG_GS 0x00002000 101 #define PFX_FLAG_OPER32 0x00004000 102 #define PFX_FLAG_ADDR32 0x00008000 103 #define PFX_FLAG_LOCK 0x00010000 104 #define PFX_FLAG_REPNE 0x00020000 105 #define PFX_FLAG_REP 0x00040000 106 107 // 108 // VDM Helper Macros 109 // 110 // All VDM/V8086 opcode emulators have the same FASTCALL function definition. 111 // We need to keep 2 parameters while the original ASM implementation uses 4: 112 // TrapFrame, PrefixFlags, Eip, InstructionSize; 113 // 114 // We pass the trap frame, and prefix flags, in our two parameters. 115 // 116 // We then realize that since the smallest prefix flag is 0x100, this gives us 117 // a count of up to 0xFF. So we OR in the instruction size with the prefix flags 118 // 119 // We further realize that we always have access to EIP from the trap frame, and 120 // that if we want the *current instruction* EIP, we simply have to add the 121 // instruction size *MINUS ONE*, and that gives us the EIP we should be looking 122 // at now, so we don't need to use the stack to push this parameter. 123 // 124 // We actually only care about the *current instruction* EIP in one location, 125 // so although it may be slightly more expensive to re-calculate the EIP one 126 // more time, this way we don't redefine ALL opcode handlers to have 3 parameters, 127 // which would be forcing stack usage in all other scenarios. 128 // 129 #define KiVdmSetVdmEFlags(x) InterlockedOr((PLONG)KiNtVdmState, (x)); 130 #define KiVdmClearVdmEFlags(x) InterlockedAnd((PLONG)KiNtVdmState, ~(x)) 131 #define KiCallVdmHandler(x) KiVdmOpcode##x(TrapFrame, Flags) 132 #define KiCallVdmPrefixHandler(x) KiVdmOpcodePrefix(TrapFrame, Flags | x) 133 #define KiVdmUnhandledOpcode(x) \ 134 BOOLEAN \ 135 FASTCALL \ 136 KiVdmOpcode##x(IN PKTRAP_FRAME TrapFrame, \ 137 IN ULONG Flags) \ 138 { \ 139 /* Not yet handled */ \ 140 UNIMPLEMENTED_DBGBREAK(); \ 141 return FALSE; \ 142 } 143 144 C_ASSERT(NPX_FRAME_LENGTH == sizeof(FX_SAVE_AREA)); 145 146 // 147 // Local parameters 148 // 149 typedef struct _KV86_FRAME 150 { 151 PVOID ThreadStack; 152 PVOID ThreadTeb; 153 PVOID PcrTeb; 154 } KV86_FRAME, *PKV86_FRAME; 155 156 // 157 // Virtual Stack Frame 158 // 159 typedef struct _KV8086_STACK_FRAME 160 { 161 KTRAP_FRAME TrapFrame; 162 FX_SAVE_AREA NpxArea; 163 KV86_FRAME V86Frame; 164 } KV8086_STACK_FRAME, *PKV8086_STACK_FRAME; 165 166 // 167 // Large Pages Support 168 // 169 typedef struct _LARGE_IDENTITY_MAP 170 { 171 PHARDWARE_PTE TopLevelDirectory; 172 ULONG Cr3; 173 ULONG_PTR StartAddress; 174 ULONG PagesCount; 175 PVOID PagesList[30]; 176 } LARGE_IDENTITY_MAP, *PLARGE_IDENTITY_MAP; 177 178 /* Diable interrupts and return whether they were enabled before */ 179 FORCEINLINE 180 BOOLEAN 181 KeDisableInterrupts(VOID) 182 { 183 ULONG Flags; 184 BOOLEAN Return; 185 186 /* Get EFLAGS and check if the interrupt bit is set */ 187 Flags = __readeflags(); 188 Return = (Flags & EFLAGS_INTERRUPT_MASK) ? TRUE: FALSE; 189 190 /* Disable interrupts */ 191 _disable(); 192 return Return; 193 } 194 195 /* Restore previous interrupt state */ 196 FORCEINLINE 197 VOID 198 KeRestoreInterrupts(BOOLEAN WereEnabled) 199 { 200 if (WereEnabled) _enable(); 201 } 202 203 // 204 // Registers an interrupt handler with an IDT vector 205 // 206 FORCEINLINE 207 VOID 208 KeRegisterInterruptHandler(IN ULONG Vector, 209 IN PVOID Handler) 210 { 211 UCHAR Entry; 212 ULONG_PTR Address; 213 PKIPCR Pcr = (PKIPCR)KeGetPcr(); 214 215 // 216 // Get the entry from the HAL 217 // 218 Entry = HalVectorToIDTEntry(Vector); 219 Address = PtrToUlong(Handler); 220 221 // 222 // Now set the data 223 // 224 Pcr->IDT[Entry].ExtendedOffset = (USHORT)(Address >> 16); 225 Pcr->IDT[Entry].Offset = (USHORT)Address; 226 } 227 228 // 229 // Returns the registered interrupt handler for a given IDT vector 230 // 231 FORCEINLINE 232 PVOID 233 KeQueryInterruptHandler(IN ULONG Vector) 234 { 235 PKIPCR Pcr = (PKIPCR)KeGetPcr(); 236 UCHAR Entry; 237 238 // 239 // Get the entry from the HAL 240 // 241 Entry = HalVectorToIDTEntry(Vector); 242 243 // 244 // Read the entry from the IDT 245 // 246 return (PVOID)(((Pcr->IDT[Entry].ExtendedOffset << 16) & 0xFFFF0000) | 247 (Pcr->IDT[Entry].Offset & 0xFFFF)); 248 } 249 250 // 251 // Invalidates the TLB entry for a specified address 252 // 253 FORCEINLINE 254 VOID 255 KeInvalidateTlbEntry(IN PVOID Address) 256 { 257 /* Invalidate the TLB entry for this address */ 258 __invlpg(Address); 259 } 260 261 FORCEINLINE 262 VOID 263 KeFlushProcessTb(VOID) 264 { 265 /* Flush the TLB by resetting CR3 */ 266 __writecr3(__readcr3()); 267 } 268 269 FORCEINLINE 270 VOID 271 KeSweepICache(IN PVOID BaseAddress, 272 IN SIZE_T FlushSize) 273 { 274 // 275 // Always sweep the whole cache 276 // 277 UNREFERENCED_PARAMETER(BaseAddress); 278 UNREFERENCED_PARAMETER(FlushSize); 279 __wbinvd(); 280 } 281 282 FORCEINLINE 283 PRKTHREAD 284 KeGetCurrentThread(VOID) 285 { 286 /* Return the current thread */ 287 return ((PKIPCR)KeGetPcr())->PrcbData.CurrentThread; 288 } 289 290 FORCEINLINE 291 VOID 292 KiRundownThread(IN PKTHREAD Thread) 293 { 294 #ifndef CONFIG_SMP 295 /* Check if this is the NPX Thread */ 296 if (KeGetCurrentPrcb()->NpxThread == Thread) 297 { 298 /* Clear it */ 299 KeGetCurrentPrcb()->NpxThread = NULL; 300 Ke386FnInit(); 301 } 302 #else 303 /* Nothing to do */ 304 #endif 305 } 306 307 FORCEINLINE 308 VOID 309 Ke386SetGdtEntryBase(PKGDTENTRY GdtEntry, PVOID BaseAddress) 310 { 311 GdtEntry->BaseLow = (USHORT)((ULONG_PTR)BaseAddress & 0xFFFF); 312 GdtEntry->HighWord.Bytes.BaseMid = (UCHAR)((ULONG_PTR)BaseAddress >> 16); 313 GdtEntry->HighWord.Bytes.BaseHi = (UCHAR)((ULONG_PTR)BaseAddress >> 24); 314 } 315 316 FORCEINLINE 317 VOID 318 KiSetTebBase(PKPCR Pcr, PVOID TebAddress) 319 { 320 Pcr->NtTib.Self = TebAddress; 321 Ke386SetGdtEntryBase(&Pcr->GDT[KGDT_R3_TEB / sizeof(KGDTENTRY)], TebAddress); 322 } 323 324 INIT_FUNCTION 325 VOID 326 FASTCALL 327 Ki386InitializeTss( 328 IN PKTSS Tss, 329 IN PKIDTENTRY Idt, 330 IN PKGDTENTRY Gdt 331 ); 332 333 INIT_FUNCTION 334 VOID 335 NTAPI 336 KiSetCR0Bits(VOID); 337 338 INIT_FUNCTION 339 VOID 340 NTAPI 341 KiGetCacheInformation(VOID); 342 343 INIT_FUNCTION 344 BOOLEAN 345 NTAPI 346 KiIsNpxPresent( 347 VOID 348 ); 349 350 INIT_FUNCTION 351 BOOLEAN 352 NTAPI 353 KiIsNpxErrataPresent( 354 VOID 355 ); 356 357 INIT_FUNCTION 358 VOID 359 NTAPI 360 KiSetProcessorType(VOID); 361 362 INIT_FUNCTION 363 ULONG 364 NTAPI 365 KiGetFeatureBits(VOID); 366 367 VOID 368 NTAPI 369 KiThreadStartup(VOID); 370 371 NTSTATUS 372 NTAPI 373 Ke386GetGdtEntryThread( 374 IN PKTHREAD Thread, 375 IN ULONG Offset, 376 IN PKGDTENTRY Descriptor 377 ); 378 379 VOID 380 NTAPI 381 KiFlushNPXState( 382 IN FLOATING_SAVE_AREA *SaveArea 383 ); 384 385 VOID 386 NTAPI 387 Ki386AdjustEsp0( 388 IN PKTRAP_FRAME TrapFrame 389 ); 390 391 VOID 392 NTAPI 393 Ki386SetupAndExitToV86Mode( 394 OUT PTEB VdmTeb 395 ); 396 397 INIT_FUNCTION 398 VOID 399 NTAPI 400 KeI386VdmInitialize( 401 VOID 402 ); 403 404 INIT_FUNCTION 405 ULONG_PTR 406 NTAPI 407 Ki386EnableGlobalPage( 408 IN ULONG_PTR Context 409 ); 410 411 INIT_FUNCTION 412 ULONG_PTR 413 NTAPI 414 Ki386EnableTargetLargePage( 415 IN ULONG_PTR Context 416 ); 417 418 BOOLEAN 419 NTAPI 420 Ki386CreateIdentityMap( 421 IN PLARGE_IDENTITY_MAP IdentityMap, 422 IN PVOID StartPtr, 423 IN ULONG Length 424 ); 425 426 VOID 427 NTAPI 428 Ki386FreeIdentityMap( 429 IN PLARGE_IDENTITY_MAP IdentityMap 430 ); 431 432 VOID 433 NTAPI 434 Ki386EnableCurrentLargePage( 435 IN ULONG_PTR StartAddress, 436 IN ULONG Cr3 437 ); 438 439 INIT_FUNCTION 440 VOID 441 NTAPI 442 KiI386PentiumLockErrataFixup( 443 VOID 444 ); 445 446 INIT_FUNCTION 447 VOID 448 NTAPI 449 KiInitializePAT( 450 VOID 451 ); 452 453 INIT_FUNCTION 454 VOID 455 NTAPI 456 KiInitializeMTRR( 457 IN BOOLEAN FinalCpu 458 ); 459 460 INIT_FUNCTION 461 VOID 462 NTAPI 463 KiAmdK6InitializeMTRR( 464 VOID 465 ); 466 467 INIT_FUNCTION 468 VOID 469 NTAPI 470 KiRestoreFastSyscallReturnState( 471 VOID 472 ); 473 474 INIT_FUNCTION 475 ULONG_PTR 476 NTAPI 477 Ki386EnableDE( 478 IN ULONG_PTR Context 479 ); 480 481 INIT_FUNCTION 482 ULONG_PTR 483 NTAPI 484 Ki386EnableFxsr( 485 IN ULONG_PTR Context 486 ); 487 488 INIT_FUNCTION 489 ULONG_PTR 490 NTAPI 491 Ki386EnableXMMIExceptions( 492 IN ULONG_PTR Context 493 ); 494 495 BOOLEAN 496 NTAPI 497 VdmDispatchBop( 498 IN PKTRAP_FRAME TrapFrame 499 ); 500 501 BOOLEAN 502 NTAPI 503 VdmDispatchPageFault( 504 _In_ PKTRAP_FRAME TrapFrame 505 ); 506 507 BOOLEAN 508 FASTCALL 509 KiVdmOpcodePrefix( 510 IN PKTRAP_FRAME TrapFrame, 511 IN ULONG Flags 512 ); 513 514 BOOLEAN 515 FASTCALL 516 Ki386HandleOpcodeV86( 517 IN PKTRAP_FRAME TrapFrame 518 ); 519 520 DECLSPEC_NORETURN 521 VOID 522 FASTCALL 523 KiEoiHelper( 524 IN PKTRAP_FRAME TrapFrame 525 ); 526 527 VOID 528 FASTCALL 529 Ki386BiosCallReturnAddress( 530 IN PKTRAP_FRAME TrapFrame 531 ); 532 533 ULONG_PTR 534 FASTCALL 535 KiExitV86Mode( 536 IN PKTRAP_FRAME TrapFrame 537 ); 538 539 DECLSPEC_NORETURN 540 VOID 541 NTAPI 542 KiDispatchExceptionFromTrapFrame( 543 IN NTSTATUS Code, 544 IN ULONG Flags, 545 IN ULONG_PTR Address, 546 IN ULONG ParameterCount, 547 IN ULONG_PTR Parameter1, 548 IN ULONG_PTR Parameter2, 549 IN ULONG_PTR Parameter3, 550 IN PKTRAP_FRAME TrapFrame 551 ); 552 553 NTSTATUS 554 NTAPI 555 KiConvertToGuiThread( 556 VOID 557 ); 558 559 // 560 // Global x86 only Kernel data 561 // 562 extern PVOID Ki386IopmSaveArea; 563 extern ULONG KeI386EFlagsAndMaskV86; 564 extern ULONG KeI386EFlagsOrMaskV86; 565 extern BOOLEAN KeI386VirtualIntExtensions; 566 extern KIDTENTRY KiIdt[MAXIMUM_IDTVECTOR+1]; 567 extern KDESCRIPTOR KiIdtDescriptor; 568 extern BOOLEAN KiI386PentiumLockErrataPresent; 569 extern ULONG KeI386NpxPresent; 570 extern ULONG KeI386XMMIPresent; 571 extern ULONG KeI386FxsrPresent; 572 extern ULONG KiMXCsrMask; 573 extern ULONG KeI386CpuType; 574 extern ULONG KeI386CpuStep; 575 extern ULONG KiFastSystemCallDisable; 576 extern UCHAR KiDebugRegisterTrapOffsets[9]; 577 extern UCHAR KiDebugRegisterContextOffsets[9]; 578 extern VOID __cdecl KiTrap02(VOID); 579 extern VOID __cdecl KiTrap08(VOID); 580 extern VOID __cdecl KiTrap13(VOID); 581 extern VOID __cdecl KiFastCallEntry(VOID); 582 extern VOID NTAPI ExpInterlockedPopEntrySListFault(VOID); 583 extern VOID NTAPI ExpInterlockedPopEntrySListResume(VOID); 584 extern VOID __cdecl CopyParams(VOID); 585 extern VOID __cdecl ReadBatch(VOID); 586 extern CHAR KiSystemCallExitBranch[]; 587 extern CHAR KiSystemCallExit[]; 588 extern CHAR KiSystemCallExit2[]; 589 590 // 591 // Trap Macros 592 // 593 #include "trap_x.h" 594 595 // 596 // Returns a thread's FPU save area 597 // 598 FORCEINLINE 599 PFX_SAVE_AREA 600 KiGetThreadNpxArea(IN PKTHREAD Thread) 601 { 602 ASSERT((ULONG_PTR)Thread->InitialStack % 16 == 0); 603 return (PFX_SAVE_AREA)((ULONG_PTR)Thread->InitialStack - sizeof(FX_SAVE_AREA)); 604 } 605 606 // 607 // Sanitizes a selector 608 // 609 FORCEINLINE 610 ULONG 611 Ke386SanitizeSeg(IN ULONG Cs, 612 IN KPROCESSOR_MODE Mode) 613 { 614 // 615 // Check if we're in kernel-mode, and force CPL 0 if so. 616 // Otherwise, force CPL 3. 617 // 618 return ((Mode == KernelMode) ? 619 (Cs & (0xFFFF & ~RPL_MASK)) : 620 (RPL_MASK | (Cs & 0xFFFF))); 621 } 622 623 // 624 // Sanitizes EFLAGS 625 // 626 FORCEINLINE 627 ULONG 628 Ke386SanitizeFlags(IN ULONG Eflags, 629 IN KPROCESSOR_MODE Mode) 630 { 631 // 632 // Check if we're in kernel-mode, and sanitize EFLAGS if so. 633 // Otherwise, also force interrupt mask on. 634 // 635 return ((Mode == KernelMode) ? 636 (Eflags & (EFLAGS_USER_SANITIZE | EFLAGS_INTERRUPT_MASK)) : 637 (EFLAGS_INTERRUPT_MASK | (Eflags & EFLAGS_USER_SANITIZE))); 638 } 639 640 // 641 // Sanitizes a Debug Register 642 // 643 FORCEINLINE 644 PVOID 645 Ke386SanitizeDr(IN PVOID DrAddress, 646 IN KPROCESSOR_MODE Mode) 647 { 648 // 649 // Check if we're in kernel-mode, and return the address directly if so. 650 // Otherwise, make sure it's not inside the kernel-mode address space. 651 // If it is, then clear the address. 652 // 653 return ((Mode == KernelMode) ? DrAddress : 654 (DrAddress <= MM_HIGHEST_USER_ADDRESS) ? DrAddress : 0); 655 } 656 657 // 658 // Exception with no arguments 659 // 660 FORCEINLINE 661 DECLSPEC_NORETURN 662 VOID 663 KiDispatchException0Args(IN NTSTATUS Code, 664 IN ULONG_PTR Address, 665 IN PKTRAP_FRAME TrapFrame) 666 { 667 /* Helper for exceptions with no arguments */ 668 KiDispatchExceptionFromTrapFrame(Code, 0, Address, 0, 0, 0, 0, TrapFrame); 669 } 670 671 // 672 // Exception with one argument 673 // 674 FORCEINLINE 675 DECLSPEC_NORETURN 676 VOID 677 KiDispatchException1Args(IN NTSTATUS Code, 678 IN ULONG_PTR Address, 679 IN ULONG P1, 680 IN PKTRAP_FRAME TrapFrame) 681 { 682 /* Helper for exceptions with no arguments */ 683 KiDispatchExceptionFromTrapFrame(Code, 0, Address, 1, P1, 0, 0, TrapFrame); 684 } 685 686 // 687 // Exception with two arguments 688 // 689 FORCEINLINE 690 DECLSPEC_NORETURN 691 VOID 692 KiDispatchException2Args(IN NTSTATUS Code, 693 IN ULONG_PTR Address, 694 IN ULONG P1, 695 IN ULONG P2, 696 IN PKTRAP_FRAME TrapFrame) 697 { 698 /* Helper for exceptions with no arguments */ 699 KiDispatchExceptionFromTrapFrame(Code, 0, Address, 2, P1, P2, 0, TrapFrame); 700 } 701 702 // 703 // Performs a system call 704 // 705 706 /* 707 * This sequence does a RtlCopyMemory(Stack - StackBytes, Arguments, StackBytes) 708 * and then calls the function associated with the system call. 709 * 710 * It's done in assembly for two reasons: we need to muck with the stack, 711 * and the call itself restores the stack back for us. The only way to do 712 * this in C is to do manual C handlers for every possible number of args on 713 * the stack, and then have the handler issue a call by pointer. This is 714 * wasteful since it'll basically push the values twice and require another 715 * level of call indirection. 716 * 717 * The ARM kernel currently does this, but it should probably be changed 718 * later to function like this as well. 719 * 720 */ 721 #ifdef __GNUC__ 722 FORCEINLINE 723 NTSTATUS 724 KiSystemCallTrampoline(IN PVOID Handler, 725 IN PVOID Arguments, 726 IN ULONG StackBytes) 727 { 728 NTSTATUS Result; 729 730 __asm__ __volatile__ 731 ( 732 "subl %1, %%esp\n\t" 733 "movl %%esp, %%edi\n\t" 734 "movl %2, %%esi\n\t" 735 "shrl $2, %1\n\t" 736 "rep movsd\n\t" 737 "call *%3\n\t" 738 "movl %%eax, %0" 739 : "=r"(Result) 740 : "c"(StackBytes), 741 "d"(Arguments), 742 "r"(Handler) 743 : "%esp", "%esi", "%edi" 744 ); 745 return Result; 746 } 747 #elif defined(_MSC_VER) 748 FORCEINLINE 749 NTSTATUS 750 KiSystemCallTrampoline(IN PVOID Handler, 751 IN PVOID Arguments, 752 IN ULONG StackBytes) 753 { 754 __asm 755 { 756 mov ecx, StackBytes 757 mov esi, Arguments 758 mov eax, Handler 759 sub esp, ecx 760 mov edi, esp 761 shr ecx, 2 762 rep movsd 763 call eax 764 } 765 /* Return with result in EAX */ 766 } 767 #else 768 #error Unknown Compiler 769 #endif 770 771 772 // 773 // Checks for pending APCs 774 // 775 FORCEINLINE 776 VOID 777 KiCheckForApcDelivery(IN PKTRAP_FRAME TrapFrame) 778 { 779 PKTHREAD Thread; 780 KIRQL OldIrql; 781 782 /* Check for V8086 or user-mode trap */ 783 if ((TrapFrame->EFlags & EFLAGS_V86_MASK) || (KiUserTrap(TrapFrame))) 784 { 785 /* Get the thread */ 786 Thread = KeGetCurrentThread(); 787 while (TRUE) 788 { 789 /* Turn off the alerted state for kernel mode */ 790 Thread->Alerted[KernelMode] = FALSE; 791 792 /* Are there pending user APCs? */ 793 if (!Thread->ApcState.UserApcPending) break; 794 795 /* Raise to APC level and enable interrupts */ 796 OldIrql = KfRaiseIrql(APC_LEVEL); 797 _enable(); 798 799 /* Deliver APCs */ 800 KiDeliverApc(UserMode, NULL, TrapFrame); 801 802 /* Restore IRQL and disable interrupts once again */ 803 KfLowerIrql(OldIrql); 804 _disable(); 805 } 806 } 807 } 808 809 // 810 // Switches from boot loader to initial kernel stack 811 // 812 INIT_FUNCTION 813 FORCEINLINE 814 VOID 815 KiSwitchToBootStack(IN ULONG_PTR InitialStack) 816 { 817 INIT_FUNCTION VOID NTAPI KiSystemStartupBootStack(VOID); 818 819 /* We have to switch to a new stack before continuing kernel initialization */ 820 #ifdef __GNUC__ 821 __asm__ 822 ( 823 "movl %0, %%esp\n\t" 824 "subl %1, %%esp\n\t" 825 "pushl %2\n\t" 826 "jmp _KiSystemStartupBootStack@0" 827 : 828 : "c"(InitialStack), 829 "i"(NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH), 830 "i"(CR0_EM | CR0_TS | CR0_MP), 831 "p"(KiSystemStartupBootStack) 832 : "%esp" 833 ); 834 #elif defined(_MSC_VER) 835 __asm 836 { 837 mov esp, InitialStack 838 sub esp, (NPX_FRAME_LENGTH + KTRAP_FRAME_ALIGN + KTRAP_FRAME_LENGTH) 839 push (CR0_EM | CR0_TS | CR0_MP) 840 jmp KiSystemStartupBootStack 841 } 842 #else 843 #error Unknown Compiler 844 #endif 845 } 846 847 // 848 // Emits the iret instruction for C code 849 // 850 FORCEINLINE 851 DECLSPEC_NORETURN 852 VOID 853 KiIret(VOID) 854 { 855 #if defined(__GNUC__) 856 __asm__ __volatile__ 857 ( 858 "iret" 859 ); 860 #elif defined(_MSC_VER) 861 __asm 862 { 863 iretd 864 } 865 #else 866 #error Unsupported compiler 867 #endif 868 UNREACHABLE; 869 } 870 871 // 872 // Normally this is done by the HAL, but on x86 as an optimization, the kernel 873 // initiates the end by calling back into the HAL and exiting the trap here. 874 // 875 FORCEINLINE 876 VOID 877 KiEndInterrupt(IN KIRQL Irql, 878 IN PKTRAP_FRAME TrapFrame) 879 { 880 /* Disable interrupts and end the interrupt */ 881 _disable(); 882 HalEndSystemInterrupt(Irql, TrapFrame); 883 884 /* Exit the interrupt */ 885 KiEoiHelper(TrapFrame); 886 } 887 888 // 889 // PERF Code 890 // 891 FORCEINLINE 892 VOID 893 Ki386PerfEnd(VOID) 894 { 895 extern ULONGLONG BootCyclesEnd, BootCycles; 896 BootCyclesEnd = __rdtsc(); 897 DbgPrint("Boot took %I64u cycles!\n", BootCyclesEnd - BootCycles); 898 DbgPrint("Interrupts: %u System Calls: %u Context Switches: %u\n", 899 KeGetCurrentPrcb()->InterruptCount, 900 KeGetCurrentPrcb()->KeSystemCalls, 901 KeGetContextSwitches(KeGetCurrentPrcb())); 902 } 903 904 FORCEINLINE 905 PULONG 906 KiGetUserModeStackAddress(void) 907 { 908 return &(KeGetCurrentThread()->TrapFrame->HardwareEsp); 909 } 910 911 #endif 912