xref: /reactos/ntoskrnl/ke/amd64/cpu.c (revision 25e2f5f2)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         GPL - See COPYING in the top level directory
4  * FILE:            ntoskrnl/ke/amd64/cpu.c
5  * PURPOSE:         Routines for CPU-level support
6  * PROGRAMMERS:     Alex Ionescu (alex.ionescu@reactos.org)
7  *                  Timo Kreuzer (timo.kreuzer@reactos.org)
8  */
9 
10 /* INCLUDES *****************************************************************/
11 
12 #include <ntoskrnl.h>
13 #include <x86x64/Cpuid.h>
14 #include <x86x64/Msr.h>
15 #define NDEBUG
16 #include <debug.h>
17 
18 /* GLOBALS *******************************************************************/
19 
20 /* The Boot TSS */
21 KTSS64 KiBootTss;
22 
23 /* CPU Features and Flags */
24 ULONG KeI386CpuType;
25 ULONG KeI386CpuStep;
26 ULONG KeI386MachineType;
27 ULONG KeI386NpxPresent = 1;
28 ULONG KeLargestCacheLine = 0x40;
29 ULONG KiDmaIoCoherency = 0;
30 BOOLEAN KiSMTProcessorsPresent;
31 
32 /* Flush data */
33 volatile LONG KiTbFlushTimeStamp;
34 
35 /* CPU Signatures */
36 static const CHAR CmpIntelID[]       = "GenuineIntel";
37 static const CHAR CmpAmdID[]         = "AuthenticAMD";
38 static const CHAR CmpCentaurID[]     = "CentaurHauls";
39 
40 typedef union _CPU_SIGNATURE
41 {
42     struct
43     {
44         ULONG Step : 4;
45         ULONG Model : 4;
46         ULONG Family : 4;
47         ULONG Unused : 4;
48         ULONG ExtendedModel : 4;
49         ULONG ExtendedFamily : 8;
50         ULONG Unused2 : 4;
51     };
52     ULONG AsULONG;
53 } CPU_SIGNATURE;
54 
55 /* FUNCTIONS *****************************************************************/
56 
57 ULONG
58 NTAPI
59 KiGetCpuVendor(VOID)
60 {
61     PKPRCB Prcb = KeGetCurrentPrcb();
62     CPU_INFO CpuInfo;
63 
64     /* Get the Vendor ID and null-terminate it */
65     KiCpuId(&CpuInfo, 0);
66 
67     /* Copy it to the PRCB and null-terminate it */
68     *(ULONG*)&Prcb->VendorString[0] = CpuInfo.Ebx;
69     *(ULONG*)&Prcb->VendorString[4] = CpuInfo.Edx;
70     *(ULONG*)&Prcb->VendorString[8] = CpuInfo.Ecx;
71     Prcb->VendorString[12] = 0;
72 
73     /* Now check the CPU Type */
74     if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID))
75     {
76         Prcb->CpuVendor = CPU_INTEL;
77     }
78     else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID))
79     {
80         Prcb->CpuVendor = CPU_AMD;
81     }
82     else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID))
83     {
84         DPRINT1("VIA CPUs not fully supported\n");
85         Prcb->CpuVendor = CPU_VIA;
86     }
87     else
88     {
89         /* Invalid CPU */
90         DPRINT1("%s CPU support not fully tested!\n", Prcb->VendorString);
91         Prcb->CpuVendor = CPU_UNKNOWN;
92     }
93 
94     return Prcb->CpuVendor;
95 }
96 
97 VOID
98 NTAPI
99 KiSetProcessorType(VOID)
100 {
101     CPU_INFO CpuInfo;
102     CPU_SIGNATURE CpuSignature;
103     BOOLEAN ExtendModel;
104     ULONG Stepping, Type, Vendor;
105 
106     /* This initializes Prcb->CpuVendor */
107     Vendor = KiGetCpuVendor();
108 
109     /* Do CPUID 1 now */
110     KiCpuId(&CpuInfo, 1);
111 
112     /*
113      * Get the Stepping and Type. The stepping contains both the
114      * Model and the Step, while the Type contains the returned Family.
115      *
116      * For the stepping, we convert this: zzzzzzxy into this: x0y
117      */
118     CpuSignature.AsULONG = CpuInfo.Eax;
119     Stepping = CpuSignature.Model;
120     ExtendModel = (CpuSignature.Family == 15);
121 #if ( (NTDDI_VERSION >= NTDDI_WINXPSP2) && (NTDDI_VERSION < NTDDI_WS03) ) || (NTDDI_VERSION >= NTDDI_WS03SP1)
122     if (CpuSignature.Family == 6)
123     {
124         ExtendModel |= (Vendor == CPU_INTEL);
125 #if (NTDDI_VERSION >= NTDDI_WIN8)
126         ExtendModel |= (Vendor == CPU_CENTAUR);
127 #endif
128     }
129 #endif
130     if (ExtendModel)
131     {
132         /* Add ExtendedModel to distinguish from non-extended values. */
133         Stepping |= (CpuSignature.ExtendedModel << 4);
134     }
135     Stepping = (Stepping << 8) | CpuSignature.Step;
136     Type = CpuSignature.Family;
137     if (CpuSignature.Family == 15)
138     {
139         /* Add ExtendedFamily to distinguish from non-extended values.
140          * It must not be larger than 0xF0 to avoid overflow. */
141         Type += min(CpuSignature.ExtendedFamily, 0xF0);
142     }
143 
144     /* Save them in the PRCB */
145     KeGetCurrentPrcb()->CpuID = TRUE;
146     KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
147     KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
148 }
149 
150 /*!
151     \brief Evaluates the KeFeatureFlag bits for the current CPU.
152 
153     \return The feature flags for this CPU.
154 
155     \see https://www.geoffchappell.com/studies/windows/km/ntoskrnl/structs/kprcb/featurebits.htm
156 
157     \todo
158      - KF_VIRT_FIRMWARE_ENABLED 0x08000000 (see notes from Geoff Chappell)
159      - KF_FPU_LEAKAGE 0x0000020000000000ULL
160      - KF_CAT 0x0000100000000000ULL
161      - KF_CET_SS 0x0000400000000000ULL
162 */
163 ULONG64
164 NTAPI
165 KiGetFeatureBits(VOID)
166 {
167     PKPRCB Prcb = KeGetCurrentPrcb();
168     ULONG Vendor;
169     ULONG64 FeatureBits = 0;
170     CPUID_SIGNATURE_REGS signature;
171     CPUID_VERSION_INFO_REGS VersionInfo;
172     CPUID_EXTENDED_FUNCTION_REGS extendedFunction;
173 
174     /* Get the Vendor ID */
175     Vendor = Prcb->CpuVendor;
176 
177     /* Make sure we got a valid vendor ID at least. */
178     if (Vendor == CPU_UNKNOWN) return FeatureBits;
179 
180     /* Get signature CPUID for the maximum function */
181     __cpuid(signature.AsInt32, CPUID_SIGNATURE);
182 
183     /* Get the CPUID Info. */
184     __cpuid(VersionInfo.AsInt32, CPUID_VERSION_INFO);
185 
186     /* Set the initial APIC ID */
187     Prcb->InitialApicId = (UCHAR)VersionInfo.Ebx.Bits.InitialLocalApicId;
188 
189     /* Convert all CPUID Feature bits into our format */
190     if (VersionInfo.Edx.Bits.VME) FeatureBits |= KF_CR4;
191     if (VersionInfo.Edx.Bits.PSE) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
192     if (VersionInfo.Edx.Bits.TSC) FeatureBits |= KF_RDTSC;
193     if (VersionInfo.Edx.Bits.CX8) FeatureBits |= KF_CMPXCHG8B;
194     if (VersionInfo.Edx.Bits.SEP) FeatureBits |= KF_FAST_SYSCALL;
195     if (VersionInfo.Edx.Bits.MTRR) FeatureBits |= KF_MTRR;
196     if (VersionInfo.Edx.Bits.PGE) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
197     if (VersionInfo.Edx.Bits.CMOV) FeatureBits |= KF_CMOV;
198     if (VersionInfo.Edx.Bits.PAT) FeatureBits |= KF_PAT;
199     if (VersionInfo.Edx.Bits.DS) FeatureBits |= KF_DTS;
200     if (VersionInfo.Edx.Bits.MMX) FeatureBits |= KF_MMX;
201     if (VersionInfo.Edx.Bits.FXSR) FeatureBits |= KF_FXSR;
202     if (VersionInfo.Edx.Bits.SSE) FeatureBits |= KF_XMMI;
203     if (VersionInfo.Edx.Bits.SSE2) FeatureBits |= KF_XMMI64;
204 
205     if (VersionInfo.Ecx.Bits.SSE3) FeatureBits |= KF_SSE3;
206     if (VersionInfo.Ecx.Bits.SSSE3) FeatureBits |= KF_SSSE3;
207     if (VersionInfo.Ecx.Bits.CMPXCHG16B) FeatureBits |= KF_CMPXCHG16B;
208     if (VersionInfo.Ecx.Bits.SSE4_1) FeatureBits |= KF_SSE4_1;
209     if (VersionInfo.Ecx.Bits.XSAVE) FeatureBits |= KF_XSTATE;
210     if (VersionInfo.Ecx.Bits.RDRAND) FeatureBits |= KF_RDRAND;
211 
212     /* Check if the CPU has hyper-threading */
213     if (VersionInfo.Edx.Bits.HTT)
214     {
215         /* Set the number of logical CPUs */
216         Prcb->LogicalProcessorsPerPhysicalProcessor =
217             VersionInfo.Ebx.Bits.MaximumAddressableIdsForLogicalProcessors;
218         if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
219         {
220             /* We're on dual-core */
221             KiSMTProcessorsPresent = TRUE;
222         }
223     }
224     else
225     {
226         /* We only have a single CPU */
227         Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
228     }
229 
230     /* Check if CPUID_THERMAL_POWER_MANAGEMENT (0x06) is supported */
231     if (signature.MaxLeaf >= CPUID_THERMAL_POWER_MANAGEMENT)
232     {
233         /* Read CPUID_THERMAL_POWER_MANAGEMENT */
234         CPUID_THERMAL_POWER_MANAGEMENT_REGS PowerInfo;
235         __cpuid(PowerInfo.AsInt32, CPUID_THERMAL_POWER_MANAGEMENT);
236 
237         if (PowerInfo.Undoc.Ecx.ACNT2) FeatureBits |= KF_ACNT2;
238     }
239 
240     /* Check if CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS (0x07) is supported */
241     if (signature.MaxLeaf >= CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS)
242     {
243         /* Read CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS */
244         CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_REGS ExtFlags;
245         __cpuidex(ExtFlags.AsInt32,
246             CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS,
247             CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS_SUB_LEAF_INFO);
248 
249         if (ExtFlags.Ebx.Bits.SMEP) FeatureBits |= KF_SMEP;
250         if (ExtFlags.Ebx.Bits.FSGSBASE) FeatureBits |= KF_RDWRFSGSBASE;
251         if (ExtFlags.Ebx.Bits.SMAP) FeatureBits |= KF_SMAP;
252     }
253 
254     /* Check if CPUID_EXTENDED_STATE (0x0D) is supported */
255     if (signature.MaxLeaf >= CPUID_EXTENDED_STATE)
256     {
257         /* Read CPUID_EXTENDED_STATE */
258         CPUID_EXTENDED_STATE_SUB_LEAF_EAX_REGS ExtStateSub;
259         __cpuidex(ExtStateSub.AsInt32,
260             CPUID_EXTENDED_STATE,
261             CPUID_EXTENDED_STATE_SUB_LEAF);
262 
263         if (ExtStateSub.Eax.Bits.XSAVEOPT) FeatureBits |= KF_XSAVEOPT;
264         if (ExtStateSub.Eax.Bits.XSAVES)  FeatureBits |= KF_XSAVES;
265     }
266 
267     /* Check extended cpuid features */
268     __cpuid(extendedFunction.AsInt32, CPUID_EXTENDED_FUNCTION);
269     if ((extendedFunction.MaxLeaf & 0xffffff00) == 0x80000000)
270     {
271         /* Check if CPUID_EXTENDED_CPU_SIG (0x80000001) is supported */
272         if (extendedFunction.MaxLeaf >= CPUID_EXTENDED_CPU_SIG)
273         {
274             /* Read CPUID_EXTENDED_CPU_SIG */
275             CPUID_EXTENDED_CPU_SIG_REGS ExtSig;
276             __cpuid(ExtSig.AsInt32, CPUID_EXTENDED_CPU_SIG);
277 
278             /* Check if NX-bit is supported */
279             if (ExtSig.Intel.Edx.Bits.NX) FeatureBits |= KF_NX_BIT;
280             if (ExtSig.Intel.Edx.Bits.Page1GB) FeatureBits |= KF_HUGEPAGE;
281             if (ExtSig.Intel.Edx.Bits.RDTSCP) FeatureBits |= KF_RDTSCP;
282 
283             /* AMD specific */
284             if (Vendor == CPU_AMD)
285             {
286                 if (ExtSig.Amd.Edx.Bits.ThreeDNow) FeatureBits |= KF_3DNOW;
287             }
288         }
289     }
290 
291     /* Vendor specific */
292     if (Vendor == CPU_INTEL)
293     {
294         FeatureBits |= KF_GENUINE_INTEL;
295 
296         /* Check for models that support LBR */
297         if (VersionInfo.Eax.Bits.FamilyId == 6)
298         {
299             if ((VersionInfo.Eax.Bits.Model == 15) ||
300                 (VersionInfo.Eax.Bits.Model == 22) ||
301                 (VersionInfo.Eax.Bits.Model == 23) ||
302                 (VersionInfo.Eax.Bits.Model == 26))
303             {
304                 FeatureBits |= KF_BRANCH;
305             }
306         }
307 
308         /* Check if VMX is available */
309         if (VersionInfo.Ecx.Bits.VMX)
310         {
311             /* Read PROCBASED ctls and check if secondary are allowed */
312             MSR_IA32_VMX_PROCBASED_CTLS_REGISTER ProcBasedCtls;
313             ProcBasedCtls.Uint64 = __readmsr(MSR_IA32_VMX_PROCBASED_CTLS);
314             if (ProcBasedCtls.Bits.Allowed1.ActivateSecondaryControls)
315             {
316                 /* Read secondary controls and check if EPT is allowed */
317                 MSR_IA32_VMX_PROCBASED_CTLS2_REGISTER ProcBasedCtls2;
318                 ProcBasedCtls2.Uint64 = __readmsr(MSR_IA32_VMX_PROCBASED_CTLS2);
319                 if (ProcBasedCtls2.Bits.Allowed1.EPT)
320                     FeatureBits |= KF_SLAT;
321             }
322         }
323     }
324     else if (Vendor == CPU_AMD)
325     {
326         FeatureBits |= KF_AUTHENTICAMD;
327         FeatureBits |= KF_BRANCH;
328 
329         /* Check extended cpuid features */
330         if ((extendedFunction.MaxLeaf & 0xffffff00) == 0x80000000)
331         {
332             /* Check if CPUID_AMD_SVM_FEATURES (0x8000000A) is supported */
333             if (extendedFunction.MaxLeaf >= CPUID_AMD_SVM_FEATURES)
334             {
335                 /* Read CPUID_AMD_SVM_FEATURES and check if Nested Paging is available */
336                 CPUID_AMD_SVM_FEATURES_REGS SvmFeatures;
337                 __cpuid(SvmFeatures.AsInt32, CPUID_AMD_SVM_FEATURES);
338                 if (SvmFeatures.Edx.Bits.NP) FeatureBits |= KF_SLAT;
339             }
340         }
341     }
342 
343     /* Return the Feature Bits */
344     return FeatureBits;
345 }
346 
347 #if DBG
348 VOID
349 KiReportCpuFeatures(IN PKPRCB Prcb)
350 {
351     ULONG CpuFeatures = 0;
352     CPU_INFO CpuInfo;
353 
354     if (Prcb->CpuVendor)
355     {
356         KiCpuId(&CpuInfo, 1);
357         CpuFeatures = CpuInfo.Edx;
358     }
359 
360     DPRINT1("Supported CPU features: ");
361 
362 #define print_kf_bit(kf_value) if (Prcb->FeatureBits & kf_value) DbgPrint(#kf_value " ")
363     print_kf_bit(KF_SMEP);
364     print_kf_bit(KF_RDTSC);
365     print_kf_bit(KF_CR4);
366     print_kf_bit(KF_CMOV);
367     print_kf_bit(KF_GLOBAL_PAGE);
368     print_kf_bit(KF_LARGE_PAGE);
369     print_kf_bit(KF_MTRR);
370     print_kf_bit(KF_CMPXCHG8B);
371     print_kf_bit(KF_MMX);
372     print_kf_bit(KF_DTS);
373     print_kf_bit(KF_PAT);
374     print_kf_bit(KF_FXSR);
375     print_kf_bit(KF_FAST_SYSCALL);
376     print_kf_bit(KF_XMMI);
377     print_kf_bit(KF_3DNOW);
378     print_kf_bit(KF_XSAVEOPT);
379     print_kf_bit(KF_XMMI64);
380     print_kf_bit(KF_BRANCH);
381     print_kf_bit(KF_00040000);
382     print_kf_bit(KF_SSE3);
383     print_kf_bit(KF_CMPXCHG16B);
384     print_kf_bit(KF_AUTHENTICAMD);
385     print_kf_bit(KF_ACNT2);
386     print_kf_bit(KF_XSTATE);
387     print_kf_bit(KF_GENUINE_INTEL);
388     print_kf_bit(KF_SLAT);
389     print_kf_bit(KF_VIRT_FIRMWARE_ENABLED);
390     print_kf_bit(KF_RDWRFSGSBASE);
391     print_kf_bit(KF_NX_BIT);
392     print_kf_bit(KF_NX_DISABLED);
393     print_kf_bit(KF_NX_ENABLED);
394     print_kf_bit(KF_RDRAND);
395     print_kf_bit(KF_SMAP);
396     print_kf_bit(KF_RDTSCP);
397     print_kf_bit(KF_HUGEPAGE);
398     print_kf_bit(KF_XSAVES);
399     print_kf_bit(KF_FPU_LEAKAGE);
400     print_kf_bit(KF_CAT);
401     print_kf_bit(KF_CET_SS);
402     print_kf_bit(KF_SSSE3);
403     print_kf_bit(KF_SSE4_1);
404     print_kf_bit(KF_SSE4_2);
405 #undef print_kf_bit
406 
407 #define print_cf(cpu_flag) if (CpuFeatures & cpu_flag) DbgPrint(#cpu_flag " ")
408     print_cf(X86_FEATURE_PAE);
409     print_cf(X86_FEATURE_HT);
410 #undef print_cf
411 
412     DbgPrint("\n");
413 }
414 #endif // DBG
415 
416 VOID
417 NTAPI
418 KiGetCacheInformation(VOID)
419 {
420     PKIPCR Pcr = (PKIPCR)KeGetPcr();
421     ULONG Vendor;
422     ULONG CacheRequests = 0, i;
423     ULONG CurrentRegister;
424     UCHAR RegisterByte;
425     BOOLEAN FirstPass = TRUE;
426     CPU_INFO CpuInfo;
427 
428     /* Set default L2 size */
429     Pcr->SecondLevelCacheSize = 0;
430 
431     /* Get the Vendor ID and make sure we support CPUID */
432     Vendor = KiGetCpuVendor();
433     if (!Vendor) return;
434 
435     /* Check the Vendor ID */
436     switch (Vendor)
437     {
438         /* Handle Intel case */
439         case CPU_INTEL:
440 
441             /*Check if we support CPUID 2 */
442             KiCpuId(&CpuInfo, 0);
443             if (CpuInfo.Eax >= 2)
444             {
445                 /* We need to loop for the number of times CPUID will tell us to */
446                 do
447                 {
448                     /* Do the CPUID call */
449                     KiCpuId(&CpuInfo, 2);
450 
451                     /* Check if it was the first call */
452                     if (FirstPass)
453                     {
454                         /*
455                          * The number of times to loop is the first byte. Read
456                          * it and then destroy it so we don't get confused.
457                          */
458                         CacheRequests = CpuInfo.Eax & 0xFF;
459                         CpuInfo.Eax &= 0xFFFFFF00;
460 
461                         /* Don't go over this again */
462                         FirstPass = FALSE;
463                     }
464 
465                     /* Loop all 4 registers */
466                     for (i = 0; i < 4; i++)
467                     {
468                         /* Get the current register */
469                         CurrentRegister = CpuInfo.AsUINT32[i];
470 
471                         /*
472                          * If the upper bit is set, then this register should
473                          * be skipped.
474                          */
475                         if (CurrentRegister & 0x80000000) continue;
476 
477                         /* Keep looping for every byte inside this register */
478                         while (CurrentRegister)
479                         {
480                             /* Read a byte, skip a byte. */
481                             RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
482                             CurrentRegister >>= 8;
483                             if (!RegisterByte) continue;
484 
485                             /*
486                              * Valid values are from 0x40 (0 bytes) to 0x49
487                              * (32MB), or from 0x80 to 0x89 (same size but
488                              * 8-way associative.
489                              */
490                             if (((RegisterByte > 0x40) &&
491                                  (RegisterByte <= 0x49)) ||
492                                 ((RegisterByte > 0x80) &&
493                                 (RegisterByte <= 0x89)))
494                             {
495                                 /* Mask out only the first nibble */
496                                 RegisterByte &= 0x0F;
497 
498                                 /* Set the L2 Cache Size */
499                                 Pcr->SecondLevelCacheSize = 0x10000 <<
500                                                             RegisterByte;
501                             }
502                         }
503                     }
504                 } while (--CacheRequests);
505             }
506             break;
507 
508         case CPU_AMD:
509 
510             /* Check if we support CPUID 0x80000006 */
511             KiCpuId(&CpuInfo, 0x80000000);
512             if (CpuInfo.Eax >= 6)
513             {
514                 /* Get 2nd level cache and tlb size */
515                 KiCpuId(&CpuInfo, 0x80000006);
516 
517                 /* Set the L2 Cache Size */
518                 Pcr->SecondLevelCacheSize = (CpuInfo.Ecx & 0xFFFF0000) >> 6;
519             }
520             break;
521     }
522 }
523 
524 VOID
525 NTAPI
526 KeFlushCurrentTb(VOID)
527 {
528     /* Flush the TLB by resetting CR3 */
529     __writecr3(__readcr3());
530 }
531 
532 VOID
533 NTAPI
534 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
535 {
536     /* Restore the CR registers */
537     __writecr0(ProcessorState->SpecialRegisters.Cr0);
538 //    __writecr2(ProcessorState->SpecialRegisters.Cr2);
539     __writecr3(ProcessorState->SpecialRegisters.Cr3);
540     __writecr4(ProcessorState->SpecialRegisters.Cr4);
541     __writecr8(ProcessorState->SpecialRegisters.Cr8);
542 
543     /* Restore the DR registers */
544     __writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
545     __writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
546     __writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
547     __writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
548     __writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
549     __writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
550 
551     /* Restore GDT, IDT, LDT and TSS */
552     __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
553 //    __lldt(&ProcessorState->SpecialRegisters.Ldtr);
554 //    __ltr(&ProcessorState->SpecialRegisters.Tr);
555     __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
556 
557     _mm_setcsr(ProcessorState->SpecialRegisters.MxCsr);
558 //    ProcessorState->SpecialRegisters.DebugControl
559 //    ProcessorState->SpecialRegisters.LastBranchToRip
560 //    ProcessorState->SpecialRegisters.LastBranchFromRip
561 //    ProcessorState->SpecialRegisters.LastExceptionToRip
562 //    ProcessorState->SpecialRegisters.LastExceptionFromRip
563 
564     /* Restore MSRs */
565     __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase);
566     __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap);
567     __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar);
568     __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar);
569     __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar);
570     __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask);
571 
572 }
573 
574 VOID
575 NTAPI
576 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
577 {
578     /* Save the CR registers */
579     ProcessorState->SpecialRegisters.Cr0 = __readcr0();
580     ProcessorState->SpecialRegisters.Cr2 = __readcr2();
581     ProcessorState->SpecialRegisters.Cr3 = __readcr3();
582     ProcessorState->SpecialRegisters.Cr4 = __readcr4();
583     ProcessorState->SpecialRegisters.Cr8 = __readcr8();
584 
585     /* Save the DR registers */
586     ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
587     ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
588     ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
589     ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
590     ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
591     ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
592 
593     /* Save GDT, IDT, LDT and TSS */
594     __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
595     __sldt(&ProcessorState->SpecialRegisters.Ldtr);
596     __str(&ProcessorState->SpecialRegisters.Tr);
597     __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
598 
599     ProcessorState->SpecialRegisters.MxCsr = _mm_getcsr();
600 //    ProcessorState->SpecialRegisters.DebugControl =
601 //    ProcessorState->SpecialRegisters.LastBranchToRip =
602 //    ProcessorState->SpecialRegisters.LastBranchFromRip =
603 //    ProcessorState->SpecialRegisters.LastExceptionToRip =
604 //    ProcessorState->SpecialRegisters.LastExceptionFromRip =
605 
606     /* Save MSRs */
607     ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE);
608     ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE);
609     ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR);
610     ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR);
611     ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR);
612     ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK);
613 }
614 
615 VOID
616 NTAPI
617 KiSaveProcessorState(
618     _In_ PKTRAP_FRAME TrapFrame,
619     _In_ PKEXCEPTION_FRAME ExceptionFrame)
620 {
621     PKPRCB Prcb = KeGetCurrentPrcb();
622 
623     /* Save all context */
624     Prcb->ProcessorState.ContextFrame.ContextFlags = CONTEXT_ALL;
625     KeTrapFrameToContext(TrapFrame, ExceptionFrame, &Prcb->ProcessorState.ContextFrame);
626 
627     /* Save control registers */
628     KiSaveProcessorControlState(&Prcb->ProcessorState);
629 }
630 
631 VOID
632 NTAPI
633 KiRestoreProcessorState(
634     _Out_ PKTRAP_FRAME TrapFrame,
635     _Out_ PKEXCEPTION_FRAME ExceptionFrame)
636 {
637     PKPRCB Prcb = KeGetCurrentPrcb();
638 
639     /* Restore all context */
640     KeContextToTrapFrame(&Prcb->ProcessorState.ContextFrame,
641                          ExceptionFrame,
642                          TrapFrame,
643                          CONTEXT_ALL,
644                          TrapFrame->PreviousMode);
645 
646     /* Restore control registers */
647     KiRestoreProcessorControlState(&Prcb->ProcessorState);
648 }
649 
650 VOID
651 NTAPI
652 KeFlushEntireTb(IN BOOLEAN Invalid,
653                 IN BOOLEAN AllProcessors)
654 {
655     KIRQL OldIrql;
656 
657     // FIXME: halfplemented
658     /* Raise the IRQL for the TB Flush */
659     OldIrql = KeRaiseIrqlToSynchLevel();
660 
661     /* Flush the TB for the Current CPU, and update the flush stamp */
662     KeFlushCurrentTb();
663 
664     /* Update the flush stamp and return to original IRQL */
665     InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
666     KeLowerIrql(OldIrql);
667 
668 }
669 
670 KAFFINITY
671 NTAPI
672 KeQueryActiveProcessors(VOID)
673 {
674     PAGED_CODE();
675 
676     /* Simply return the number of active processors */
677     return KeActiveProcessors;
678 }
679 
680 NTSTATUS
681 NTAPI
682 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState)
683 {
684     UNREFERENCED_PARAMETER(FloatingState);
685     return STATUS_SUCCESS;
686 }
687 
688 NTSTATUS
689 NTAPI
690 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState)
691 {
692     UNREFERENCED_PARAMETER(FloatingState);
693     return STATUS_SUCCESS;
694 }
695 
696 BOOLEAN
697 NTAPI
698 KeInvalidateAllCaches(VOID)
699 {
700     /* Invalidate all caches */
701     __wbinvd();
702     return TRUE;
703 }
704 
705 /*
706  * @implemented
707  */
708 ULONG
709 NTAPI
710 KeGetRecommendedSharedDataAlignment(VOID)
711 {
712     /* Return the global variable */
713     return KeLargestCacheLine;
714 }
715 
716 /*
717  * @implemented
718  */
719 VOID
720 __cdecl
721 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
722 {
723     /* Capture the context */
724     RtlCaptureContext(&State->ContextFrame);
725 
726     /* Capture the control state */
727     KiSaveProcessorControlState(State);
728 }
729 
730 /*
731  * @implemented
732  */
733 VOID
734 NTAPI
735 KeSetDmaIoCoherency(IN ULONG Coherency)
736 {
737     /* Save the coherency globally */
738     KiDmaIoCoherency = Coherency;
739 }
740