xref: /reactos/ntoskrnl/ke/amd64/cpu.c (revision 9393fc32)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         GPL - See COPYING in the top level directory
4  * FILE:            ntoskrnl/ke/amd64/cpu.c
5  * PURPOSE:         Routines for CPU-level support
6  * PROGRAMMERS:     Alex Ionescu (alex.ionescu@reactos.org)
7  *                  Timo Kreuzer (timo.kreuzer@reactos.org)
8  */
9 
10 /* INCLUDES *****************************************************************/
11 
12 #include <ntoskrnl.h>
13 #define NDEBUG
14 #include <debug.h>
15 
16 /* GLOBALS *******************************************************************/
17 
18 /* The Boot TSS */
19 KTSS64 KiBootTss;
20 
21 /* CPU Features and Flags */
22 ULONG KeI386CpuType;
23 ULONG KeI386CpuStep;
24 ULONG KeI386MachineType;
25 ULONG KeI386NpxPresent = 1;
26 ULONG KeLargestCacheLine = 0x40;
27 ULONG KiDmaIoCoherency = 0;
28 BOOLEAN KiSMTProcessorsPresent;
29 
30 /* Flush data */
31 volatile LONG KiTbFlushTimeStamp;
32 
33 /* CPU Signatures */
34 static const CHAR CmpIntelID[]       = "GenuineIntel";
35 static const CHAR CmpAmdID[]         = "AuthenticAMD";
36 static const CHAR CmpCyrixID[]       = "CyrixInstead";
37 static const CHAR CmpTransmetaID[]   = "GenuineTMx86";
38 static const CHAR CmpCentaurID[]     = "CentaurHauls";
39 static const CHAR CmpRiseID[]        = "RiseRiseRise";
40 
41 /* FUNCTIONS *****************************************************************/
42 
43 VOID
44 NTAPI
45 KiSetProcessorType(VOID)
46 {
47     CPU_INFO CpuInfo;
48     ULONG Stepping, Type;
49 
50     /* Do CPUID 1 now */
51     KiCpuId(&CpuInfo, 1);
52 
53     /*
54      * Get the Stepping and Type. The stepping contains both the
55      * Model and the Step, while the Type contains the returned Type.
56      * We ignore the family.
57      *
58      * For the stepping, we convert this: zzzzzzxy into this: x0y
59      */
60     Stepping = CpuInfo.Eax & 0xF0;
61     Stepping <<= 4;
62     Stepping += (CpuInfo.Eax & 0xFF);
63     Stepping &= 0xF0F;
64     Type = CpuInfo.Eax & 0xF00;
65     Type >>= 8;
66 
67     /* Save them in the PRCB */
68     KeGetCurrentPrcb()->CpuID = TRUE;
69     KeGetCurrentPrcb()->CpuType = (UCHAR)Type;
70     KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping;
71 }
72 
73 ULONG
74 NTAPI
75 KiGetCpuVendor(VOID)
76 {
77     PKPRCB Prcb = KeGetCurrentPrcb();
78     CPU_INFO CpuInfo;
79 
80     /* Get the Vendor ID and null-terminate it */
81     KiCpuId(&CpuInfo, 0);
82 
83     /* Copy it to the PRCB and null-terminate it */
84     *(ULONG*)&Prcb->VendorString[0] = CpuInfo.Ebx;
85     *(ULONG*)&Prcb->VendorString[4] = CpuInfo.Edx;
86     *(ULONG*)&Prcb->VendorString[8] = CpuInfo.Ecx;
87     Prcb->VendorString[12] = 0;
88 
89     /* Now check the CPU Type */
90     if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID))
91     {
92         return CPU_INTEL;
93     }
94     else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID))
95     {
96         return CPU_AMD;
97     }
98     else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID))
99     {
100         DPRINT1("VIA CPUs not fully supported\n");
101         return CPU_VIA;
102     }
103     else if (!strcmp((PCHAR)Prcb->VendorString, CmpRiseID))
104     {
105         DPRINT1("Rise CPUs not fully supported\n");
106         return 0;
107     }
108 
109     /* Invalid CPU */
110     return CPU_UNKNOWN;
111 }
112 
113 ULONG
114 NTAPI
115 KiGetFeatureBits(VOID)
116 {
117     PKPRCB Prcb = KeGetCurrentPrcb();
118     ULONG Vendor;
119     ULONG FeatureBits = KF_WORKING_PTE;
120     CPU_INFO CpuInfo;
121 
122     /* Get the Vendor ID */
123     Vendor = KiGetCpuVendor();
124 
125     /* Make sure we got a valid vendor ID at least. */
126     if (!Vendor) return FeatureBits;
127 
128     /* Get the CPUID Info. */
129     KiCpuId(&CpuInfo, 1);
130 
131     /* Set the initial APIC ID */
132     Prcb->InitialApicId = (UCHAR)(CpuInfo.Ebx >> 24);
133 
134     /* Convert all CPUID Feature bits into our format */
135     if (CpuInfo.Edx & X86_FEATURE_VME) FeatureBits |= KF_V86_VIS | KF_CR4;
136     if (CpuInfo.Edx & X86_FEATURE_PSE) FeatureBits |= KF_LARGE_PAGE | KF_CR4;
137     if (CpuInfo.Edx & X86_FEATURE_TSC) FeatureBits |= KF_RDTSC;
138     if (CpuInfo.Edx & X86_FEATURE_CX8) FeatureBits |= KF_CMPXCHG8B;
139     if (CpuInfo.Edx & X86_FEATURE_SYSCALL) FeatureBits |= KF_FAST_SYSCALL;
140     if (CpuInfo.Edx & X86_FEATURE_MTTR) FeatureBits |= KF_MTRR;
141     if (CpuInfo.Edx & X86_FEATURE_PGE) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4;
142     if (CpuInfo.Edx & X86_FEATURE_CMOV) FeatureBits |= KF_CMOV;
143     if (CpuInfo.Edx & X86_FEATURE_PAT) FeatureBits |= KF_PAT;
144     if (CpuInfo.Edx & X86_FEATURE_DS) FeatureBits |= KF_DTS;
145     if (CpuInfo.Edx & X86_FEATURE_MMX) FeatureBits |= KF_MMX;
146     if (CpuInfo.Edx & X86_FEATURE_FXSR) FeatureBits |= KF_FXSR;
147     if (CpuInfo.Edx & X86_FEATURE_SSE) FeatureBits |= KF_XMMI;
148     if (CpuInfo.Edx & X86_FEATURE_SSE2) FeatureBits |= KF_XMMI64;
149 
150     if (CpuInfo.Ecx & X86_FEATURE_SSE3) FeatureBits |= KF_SSE3;
151     //if (CpuInfo.Ecx & X86_FEATURE_MONITOR) FeatureBits |= KF_MONITOR;
152     //if (CpuInfo.Ecx & X86_FEATURE_SSSE3) FeatureBits |= KF_SSE3SUP;
153     if (CpuInfo.Ecx & X86_FEATURE_CX16) FeatureBits |= KF_CMPXCHG16B;
154     //if (CpuInfo.Ecx & X86_FEATURE_SSE41) FeatureBits |= KF_SSE41;
155     //if (CpuInfo.Ecx & X86_FEATURE_POPCNT) FeatureBits |= KF_POPCNT;
156     if (CpuInfo.Ecx & X86_FEATURE_XSAVE) FeatureBits |= KF_XSTATE;
157 
158     /* Check if the CPU has hyper-threading */
159     if (CpuInfo.Edx & X86_FEATURE_HT)
160     {
161         /* Set the number of logical CPUs */
162         Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(CpuInfo.Ebx >> 16);
163         if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1)
164         {
165             /* We're on dual-core */
166             KiSMTProcessorsPresent = TRUE;
167         }
168     }
169     else
170     {
171         /* We only have a single CPU */
172         Prcb->LogicalProcessorsPerPhysicalProcessor = 1;
173     }
174 
175     /* Check extended cpuid features */
176     KiCpuId(&CpuInfo, 0x80000000);
177     if ((CpuInfo.Eax & 0xffffff00) == 0x80000000)
178     {
179         /* Check if CPUID 0x80000001 is supported */
180         if (CpuInfo.Eax >= 0x80000001)
181         {
182             /* Check which extended features are available. */
183             KiCpuId(&CpuInfo, 0x80000001);
184 
185             /* Check if NX-bit is supported */
186             if (CpuInfo.Edx & X86_FEATURE_NX) FeatureBits |= KF_NX_BIT;
187 
188             /* Now handle each features for each CPU Vendor */
189             switch (Vendor)
190             {
191                 case CPU_AMD:
192                     if (CpuInfo.Edx & 0x80000000) FeatureBits |= KF_3DNOW;
193                     break;
194             }
195         }
196     }
197 
198     /* Return the Feature Bits */
199     return FeatureBits;
200 }
201 
202 VOID
203 NTAPI
204 KiGetCacheInformation(VOID)
205 {
206     PKIPCR Pcr = (PKIPCR)KeGetPcr();
207     ULONG Vendor;
208     ULONG CacheRequests = 0, i;
209     ULONG CurrentRegister;
210     UCHAR RegisterByte;
211     BOOLEAN FirstPass = TRUE;
212     CPU_INFO CpuInfo;
213 
214     /* Set default L2 size */
215     Pcr->SecondLevelCacheSize = 0;
216 
217     /* Get the Vendor ID and make sure we support CPUID */
218     Vendor = KiGetCpuVendor();
219     if (!Vendor) return;
220 
221     /* Check the Vendor ID */
222     switch (Vendor)
223     {
224         /* Handle Intel case */
225         case CPU_INTEL:
226 
227             /*Check if we support CPUID 2 */
228             KiCpuId(&CpuInfo, 0);
229             if (CpuInfo.Eax >= 2)
230             {
231                 /* We need to loop for the number of times CPUID will tell us to */
232                 do
233                 {
234                     /* Do the CPUID call */
235                     KiCpuId(&CpuInfo, 2);
236 
237                     /* Check if it was the first call */
238                     if (FirstPass)
239                     {
240                         /*
241                          * The number of times to loop is the first byte. Read
242                          * it and then destroy it so we don't get confused.
243                          */
244                         CacheRequests = CpuInfo.Eax & 0xFF;
245                         CpuInfo.Eax &= 0xFFFFFF00;
246 
247                         /* Don't go over this again */
248                         FirstPass = FALSE;
249                     }
250 
251                     /* Loop all 4 registers */
252                     for (i = 0; i < 4; i++)
253                     {
254                         /* Get the current register */
255                         CurrentRegister = CpuInfo.AsUINT32[i];
256 
257                         /*
258                          * If the upper bit is set, then this register should
259                          * be skipped.
260                          */
261                         if (CurrentRegister & 0x80000000) continue;
262 
263                         /* Keep looping for every byte inside this register */
264                         while (CurrentRegister)
265                         {
266                             /* Read a byte, skip a byte. */
267                             RegisterByte = (UCHAR)(CurrentRegister & 0xFF);
268                             CurrentRegister >>= 8;
269                             if (!RegisterByte) continue;
270 
271                             /*
272                              * Valid values are from 0x40 (0 bytes) to 0x49
273                              * (32MB), or from 0x80 to 0x89 (same size but
274                              * 8-way associative.
275                              */
276                             if (((RegisterByte > 0x40) &&
277                                  (RegisterByte <= 0x49)) ||
278                                 ((RegisterByte > 0x80) &&
279                                 (RegisterByte <= 0x89)))
280                             {
281                                 /* Mask out only the first nibble */
282                                 RegisterByte &= 0x0F;
283 
284                                 /* Set the L2 Cache Size */
285                                 Pcr->SecondLevelCacheSize = 0x10000 <<
286                                                             RegisterByte;
287                             }
288                         }
289                     }
290                 } while (--CacheRequests);
291             }
292             break;
293 
294         case CPU_AMD:
295 
296             /* Check if we support CPUID 0x80000006 */
297             KiCpuId(&CpuInfo, 0x80000000);
298             if (CpuInfo.Eax >= 6)
299             {
300                 /* Get 2nd level cache and tlb size */
301                 KiCpuId(&CpuInfo, 0x80000006);
302 
303                 /* Set the L2 Cache Size */
304                 Pcr->SecondLevelCacheSize = (CpuInfo.Ecx & 0xFFFF0000) >> 6;
305             }
306             break;
307     }
308 }
309 
310 VOID
311 NTAPI
312 KeFlushCurrentTb(VOID)
313 {
314     /* Flush the TLB by resetting CR3 */
315     __writecr3(__readcr3());
316 }
317 
318 VOID
319 NTAPI
320 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState)
321 {
322     /* Restore the CR registers */
323     __writecr0(ProcessorState->SpecialRegisters.Cr0);
324 //    __writecr2(ProcessorState->SpecialRegisters.Cr2);
325     __writecr3(ProcessorState->SpecialRegisters.Cr3);
326     __writecr4(ProcessorState->SpecialRegisters.Cr4);
327     __writecr8(ProcessorState->SpecialRegisters.Cr8);
328 
329     /* Restore the DR registers */
330     __writedr(0, ProcessorState->SpecialRegisters.KernelDr0);
331     __writedr(1, ProcessorState->SpecialRegisters.KernelDr1);
332     __writedr(2, ProcessorState->SpecialRegisters.KernelDr2);
333     __writedr(3, ProcessorState->SpecialRegisters.KernelDr3);
334     __writedr(6, ProcessorState->SpecialRegisters.KernelDr6);
335     __writedr(7, ProcessorState->SpecialRegisters.KernelDr7);
336 
337     /* Restore GDT, IDT, LDT and TSS */
338     __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
339 //    __lldt(&ProcessorState->SpecialRegisters.Ldtr);
340 //    __ltr(&ProcessorState->SpecialRegisters.Tr);
341     __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
342 
343 //    __ldmxcsr(&ProcessorState->SpecialRegisters.MxCsr); // FIXME
344 //    ProcessorState->SpecialRegisters.DebugControl
345 //    ProcessorState->SpecialRegisters.LastBranchToRip
346 //    ProcessorState->SpecialRegisters.LastBranchFromRip
347 //    ProcessorState->SpecialRegisters.LastExceptionToRip
348 //    ProcessorState->SpecialRegisters.LastExceptionFromRip
349 
350     /* Restore MSRs */
351     __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase);
352     __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap);
353     __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar);
354     __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar);
355     __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar);
356     __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask);
357 
358 }
359 
360 VOID
361 NTAPI
362 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState)
363 {
364     /* Save the CR registers */
365     ProcessorState->SpecialRegisters.Cr0 = __readcr0();
366     ProcessorState->SpecialRegisters.Cr2 = __readcr2();
367     ProcessorState->SpecialRegisters.Cr3 = __readcr3();
368     ProcessorState->SpecialRegisters.Cr4 = __readcr4();
369     ProcessorState->SpecialRegisters.Cr8 = __readcr8();
370 
371     /* Save the DR registers */
372     ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0);
373     ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1);
374     ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2);
375     ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3);
376     ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6);
377     ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7);
378 
379     /* Save GDT, IDT, LDT and TSS */
380     __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit);
381     __sldt(&ProcessorState->SpecialRegisters.Ldtr);
382     __str(&ProcessorState->SpecialRegisters.Tr);
383     __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit);
384 
385 //    __stmxcsr(&ProcessorState->SpecialRegisters.MxCsr);
386 //    ProcessorState->SpecialRegisters.DebugControl =
387 //    ProcessorState->SpecialRegisters.LastBranchToRip =
388 //    ProcessorState->SpecialRegisters.LastBranchFromRip =
389 //    ProcessorState->SpecialRegisters.LastExceptionToRip =
390 //    ProcessorState->SpecialRegisters.LastExceptionFromRip =
391 
392     /* Save MSRs */
393     ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE);
394     ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE);
395     ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR);
396     ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR);
397     ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR);
398     ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK);
399 }
400 
401 VOID
402 NTAPI
403 KeFlushEntireTb(IN BOOLEAN Invalid,
404                 IN BOOLEAN AllProcessors)
405 {
406     KIRQL OldIrql;
407 
408     // FIXME: halfplemented
409     /* Raise the IRQL for the TB Flush */
410     OldIrql = KeRaiseIrqlToSynchLevel();
411 
412     /* Flush the TB for the Current CPU, and update the flush stamp */
413     KeFlushCurrentTb();
414 
415     /* Update the flush stamp and return to original IRQL */
416     InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1);
417     KeLowerIrql(OldIrql);
418 
419 }
420 
421 KAFFINITY
422 NTAPI
423 KeQueryActiveProcessors(VOID)
424 {
425     PAGED_CODE();
426 
427     /* Simply return the number of active processors */
428     return KeActiveProcessors;
429 }
430 
431 NTSTATUS
432 NTAPI
433 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState)
434 {
435     UNREFERENCED_PARAMETER(FloatingState);
436     return STATUS_SUCCESS;
437 }
438 
439 NTSTATUS
440 NTAPI
441 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState)
442 {
443     UNREFERENCED_PARAMETER(FloatingState);
444     return STATUS_SUCCESS;
445 }
446 
447 BOOLEAN
448 NTAPI
449 KeInvalidateAllCaches(VOID)
450 {
451     /* Invalidate all caches */
452     __wbinvd();
453     return TRUE;
454 }
455 
456 /*
457  * @implemented
458  */
459 ULONG
460 NTAPI
461 KeGetRecommendedSharedDataAlignment(VOID)
462 {
463     /* Return the global variable */
464     return KeLargestCacheLine;
465 }
466 
467 /*
468  * @implemented
469  */
470 VOID
471 __cdecl
472 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State)
473 {
474     /* Capture the context */
475     RtlCaptureContext(&State->ContextFrame);
476 
477     /* Capture the control state */
478     KiSaveProcessorControlState(State);
479 }
480 
481 /*
482  * @implemented
483  */
484 VOID
485 NTAPI
486 KeSetDmaIoCoherency(IN ULONG Coherency)
487 {
488     /* Save the coherency globally */
489     KiDmaIoCoherency = Coherency;
490 }
491