1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: GPL - See COPYING in the top level directory 4 * FILE: ntoskrnl/ke/amd64/cpu.c 5 * PURPOSE: Routines for CPU-level support 6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org) 7 * Timo Kreuzer (timo.kreuzer@reactos.org) 8 */ 9 10 /* INCLUDES *****************************************************************/ 11 12 #include <ntoskrnl.h> 13 #define NDEBUG 14 #include <debug.h> 15 16 /* GLOBALS *******************************************************************/ 17 18 /* The Boot TSS */ 19 KTSS64 KiBootTss; 20 21 /* CPU Features and Flags */ 22 ULONG KeI386CpuType; 23 ULONG KeI386CpuStep; 24 ULONG KeI386MachineType; 25 ULONG KeI386NpxPresent = 1; 26 ULONG KeLargestCacheLine = 0x40; 27 ULONG KiDmaIoCoherency = 0; 28 BOOLEAN KiSMTProcessorsPresent; 29 30 /* Flush data */ 31 volatile LONG KiTbFlushTimeStamp; 32 33 /* CPU Signatures */ 34 static const CHAR CmpIntelID[] = "GenuineIntel"; 35 static const CHAR CmpAmdID[] = "AuthenticAMD"; 36 static const CHAR CmpCentaurID[] = "CentaurHauls"; 37 38 typedef union _CPU_SIGNATURE 39 { 40 struct 41 { 42 ULONG Step : 4; 43 ULONG Model : 4; 44 ULONG Family : 4; 45 ULONG Unused : 4; 46 ULONG ExtendedModel : 4; 47 ULONG ExtendedFamily : 8; 48 ULONG Unused2 : 4; 49 }; 50 ULONG AsULONG; 51 } CPU_SIGNATURE; 52 53 /* FUNCTIONS *****************************************************************/ 54 55 ULONG 56 NTAPI 57 KiGetCpuVendor(VOID) 58 { 59 PKPRCB Prcb = KeGetCurrentPrcb(); 60 CPU_INFO CpuInfo; 61 62 /* Get the Vendor ID and null-terminate it */ 63 KiCpuId(&CpuInfo, 0); 64 65 /* Copy it to the PRCB and null-terminate it */ 66 *(ULONG*)&Prcb->VendorString[0] = CpuInfo.Ebx; 67 *(ULONG*)&Prcb->VendorString[4] = CpuInfo.Edx; 68 *(ULONG*)&Prcb->VendorString[8] = CpuInfo.Ecx; 69 Prcb->VendorString[12] = 0; 70 71 /* Now check the CPU Type */ 72 if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID)) 73 { 74 Prcb->CpuVendor = CPU_INTEL; 75 } 76 else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID)) 77 { 78 Prcb->CpuVendor = CPU_AMD; 79 } 80 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID)) 81 { 82 DPRINT1("VIA CPUs not fully supported\n"); 83 Prcb->CpuVendor = CPU_VIA; 84 } 85 else 86 { 87 /* Invalid CPU */ 88 DPRINT1("%s CPU support not fully tested!\n", Prcb->VendorString); 89 Prcb->CpuVendor = CPU_UNKNOWN; 90 } 91 92 return Prcb->CpuVendor; 93 } 94 95 VOID 96 NTAPI 97 KiSetProcessorType(VOID) 98 { 99 CPU_INFO CpuInfo; 100 CPU_SIGNATURE CpuSignature; 101 BOOLEAN ExtendModel; 102 ULONG Stepping, Type, Vendor; 103 104 /* This initializes Prcb->CpuVendor */ 105 Vendor = KiGetCpuVendor(); 106 107 /* Do CPUID 1 now */ 108 KiCpuId(&CpuInfo, 1); 109 110 /* 111 * Get the Stepping and Type. The stepping contains both the 112 * Model and the Step, while the Type contains the returned Family. 113 * 114 * For the stepping, we convert this: zzzzzzxy into this: x0y 115 */ 116 CpuSignature.AsULONG = CpuInfo.Eax; 117 Stepping = CpuSignature.Model; 118 ExtendModel = (CpuSignature.Family == 15); 119 #if ( (NTDDI_VERSION >= NTDDI_WINXPSP2) && (NTDDI_VERSION < NTDDI_WS03) ) || (NTDDI_VERSION >= NTDDI_WS03SP1) 120 if (CpuSignature.Family == 6) 121 { 122 ExtendModel |= (Vendor == CPU_INTEL); 123 #if (NTDDI_VERSION >= NTDDI_WIN8) 124 ExtendModel |= (Vendor == CPU_CENTAUR); 125 #endif 126 } 127 #endif 128 if (ExtendModel) 129 { 130 /* Add ExtendedModel to distinguish from non-extended values. */ 131 Stepping |= (CpuSignature.ExtendedModel << 4); 132 } 133 Stepping = (Stepping << 8) | CpuSignature.Step; 134 Type = CpuSignature.Family; 135 if (CpuSignature.Family == 15) 136 { 137 /* Add ExtendedFamily to distinguish from non-extended values. 138 * It must not be larger than 0xF0 to avoid overflow. */ 139 Type += min(CpuSignature.ExtendedFamily, 0xF0); 140 } 141 142 /* Save them in the PRCB */ 143 KeGetCurrentPrcb()->CpuID = TRUE; 144 KeGetCurrentPrcb()->CpuType = (UCHAR)Type; 145 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping; 146 } 147 148 ULONG 149 NTAPI 150 KiGetFeatureBits(VOID) 151 { 152 PKPRCB Prcb = KeGetCurrentPrcb(); 153 ULONG Vendor; 154 ULONG FeatureBits = KF_WORKING_PTE; 155 CPU_INFO CpuInfo; 156 157 /* Get the Vendor ID */ 158 Vendor = Prcb->CpuVendor; 159 160 /* Make sure we got a valid vendor ID at least. */ 161 if (!Vendor) return FeatureBits; 162 163 /* Get the CPUID Info. */ 164 KiCpuId(&CpuInfo, 1); 165 166 /* Set the initial APIC ID */ 167 Prcb->InitialApicId = (UCHAR)(CpuInfo.Ebx >> 24); 168 169 /* Convert all CPUID Feature bits into our format */ 170 if (CpuInfo.Edx & X86_FEATURE_VME) FeatureBits |= KF_V86_VIS | KF_CR4; 171 if (CpuInfo.Edx & X86_FEATURE_PSE) FeatureBits |= KF_LARGE_PAGE | KF_CR4; 172 if (CpuInfo.Edx & X86_FEATURE_TSC) FeatureBits |= KF_RDTSC; 173 if (CpuInfo.Edx & X86_FEATURE_CX8) FeatureBits |= KF_CMPXCHG8B; 174 if (CpuInfo.Edx & X86_FEATURE_SYSCALL) FeatureBits |= KF_FAST_SYSCALL; 175 if (CpuInfo.Edx & X86_FEATURE_MTTR) FeatureBits |= KF_MTRR; 176 if (CpuInfo.Edx & X86_FEATURE_PGE) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4; 177 if (CpuInfo.Edx & X86_FEATURE_CMOV) FeatureBits |= KF_CMOV; 178 if (CpuInfo.Edx & X86_FEATURE_PAT) FeatureBits |= KF_PAT; 179 if (CpuInfo.Edx & X86_FEATURE_DS) FeatureBits |= KF_DTS; 180 if (CpuInfo.Edx & X86_FEATURE_MMX) FeatureBits |= KF_MMX; 181 if (CpuInfo.Edx & X86_FEATURE_FXSR) FeatureBits |= KF_FXSR; 182 if (CpuInfo.Edx & X86_FEATURE_SSE) FeatureBits |= KF_XMMI; 183 if (CpuInfo.Edx & X86_FEATURE_SSE2) FeatureBits |= KF_XMMI64; 184 185 if (CpuInfo.Ecx & X86_FEATURE_SSE3) FeatureBits |= KF_SSE3; 186 //if (CpuInfo.Ecx & X86_FEATURE_MONITOR) FeatureBits |= KF_MONITOR; 187 //if (CpuInfo.Ecx & X86_FEATURE_SSSE3) FeatureBits |= KF_SSE3SUP; 188 if (CpuInfo.Ecx & X86_FEATURE_CX16) FeatureBits |= KF_CMPXCHG16B; 189 //if (CpuInfo.Ecx & X86_FEATURE_SSE41) FeatureBits |= KF_SSE41; 190 //if (CpuInfo.Ecx & X86_FEATURE_POPCNT) FeatureBits |= KF_POPCNT; 191 if (CpuInfo.Ecx & X86_FEATURE_XSAVE) FeatureBits |= KF_XSTATE; 192 193 /* Check if the CPU has hyper-threading */ 194 if (CpuInfo.Edx & X86_FEATURE_HT) 195 { 196 /* Set the number of logical CPUs */ 197 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(CpuInfo.Ebx >> 16); 198 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1) 199 { 200 /* We're on dual-core */ 201 KiSMTProcessorsPresent = TRUE; 202 } 203 } 204 else 205 { 206 /* We only have a single CPU */ 207 Prcb->LogicalProcessorsPerPhysicalProcessor = 1; 208 } 209 210 /* Check extended cpuid features */ 211 KiCpuId(&CpuInfo, 0x80000000); 212 if ((CpuInfo.Eax & 0xffffff00) == 0x80000000) 213 { 214 /* Check if CPUID 0x80000001 is supported */ 215 if (CpuInfo.Eax >= 0x80000001) 216 { 217 /* Check which extended features are available. */ 218 KiCpuId(&CpuInfo, 0x80000001); 219 220 /* Check if NX-bit is supported */ 221 if (CpuInfo.Edx & X86_FEATURE_NX) FeatureBits |= KF_NX_BIT; 222 223 /* Now handle each features for each CPU Vendor */ 224 switch (Vendor) 225 { 226 case CPU_AMD: 227 if (CpuInfo.Edx & 0x80000000) FeatureBits |= KF_3DNOW; 228 break; 229 } 230 } 231 } 232 233 /* Return the Feature Bits */ 234 return FeatureBits; 235 } 236 237 VOID 238 NTAPI 239 KiGetCacheInformation(VOID) 240 { 241 PKIPCR Pcr = (PKIPCR)KeGetPcr(); 242 ULONG Vendor; 243 ULONG CacheRequests = 0, i; 244 ULONG CurrentRegister; 245 UCHAR RegisterByte; 246 BOOLEAN FirstPass = TRUE; 247 CPU_INFO CpuInfo; 248 249 /* Set default L2 size */ 250 Pcr->SecondLevelCacheSize = 0; 251 252 /* Get the Vendor ID and make sure we support CPUID */ 253 Vendor = KiGetCpuVendor(); 254 if (!Vendor) return; 255 256 /* Check the Vendor ID */ 257 switch (Vendor) 258 { 259 /* Handle Intel case */ 260 case CPU_INTEL: 261 262 /*Check if we support CPUID 2 */ 263 KiCpuId(&CpuInfo, 0); 264 if (CpuInfo.Eax >= 2) 265 { 266 /* We need to loop for the number of times CPUID will tell us to */ 267 do 268 { 269 /* Do the CPUID call */ 270 KiCpuId(&CpuInfo, 2); 271 272 /* Check if it was the first call */ 273 if (FirstPass) 274 { 275 /* 276 * The number of times to loop is the first byte. Read 277 * it and then destroy it so we don't get confused. 278 */ 279 CacheRequests = CpuInfo.Eax & 0xFF; 280 CpuInfo.Eax &= 0xFFFFFF00; 281 282 /* Don't go over this again */ 283 FirstPass = FALSE; 284 } 285 286 /* Loop all 4 registers */ 287 for (i = 0; i < 4; i++) 288 { 289 /* Get the current register */ 290 CurrentRegister = CpuInfo.AsUINT32[i]; 291 292 /* 293 * If the upper bit is set, then this register should 294 * be skipped. 295 */ 296 if (CurrentRegister & 0x80000000) continue; 297 298 /* Keep looping for every byte inside this register */ 299 while (CurrentRegister) 300 { 301 /* Read a byte, skip a byte. */ 302 RegisterByte = (UCHAR)(CurrentRegister & 0xFF); 303 CurrentRegister >>= 8; 304 if (!RegisterByte) continue; 305 306 /* 307 * Valid values are from 0x40 (0 bytes) to 0x49 308 * (32MB), or from 0x80 to 0x89 (same size but 309 * 8-way associative. 310 */ 311 if (((RegisterByte > 0x40) && 312 (RegisterByte <= 0x49)) || 313 ((RegisterByte > 0x80) && 314 (RegisterByte <= 0x89))) 315 { 316 /* Mask out only the first nibble */ 317 RegisterByte &= 0x0F; 318 319 /* Set the L2 Cache Size */ 320 Pcr->SecondLevelCacheSize = 0x10000 << 321 RegisterByte; 322 } 323 } 324 } 325 } while (--CacheRequests); 326 } 327 break; 328 329 case CPU_AMD: 330 331 /* Check if we support CPUID 0x80000006 */ 332 KiCpuId(&CpuInfo, 0x80000000); 333 if (CpuInfo.Eax >= 6) 334 { 335 /* Get 2nd level cache and tlb size */ 336 KiCpuId(&CpuInfo, 0x80000006); 337 338 /* Set the L2 Cache Size */ 339 Pcr->SecondLevelCacheSize = (CpuInfo.Ecx & 0xFFFF0000) >> 6; 340 } 341 break; 342 } 343 } 344 345 VOID 346 NTAPI 347 KeFlushCurrentTb(VOID) 348 { 349 /* Flush the TLB by resetting CR3 */ 350 __writecr3(__readcr3()); 351 } 352 353 VOID 354 NTAPI 355 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState) 356 { 357 /* Restore the CR registers */ 358 __writecr0(ProcessorState->SpecialRegisters.Cr0); 359 // __writecr2(ProcessorState->SpecialRegisters.Cr2); 360 __writecr3(ProcessorState->SpecialRegisters.Cr3); 361 __writecr4(ProcessorState->SpecialRegisters.Cr4); 362 __writecr8(ProcessorState->SpecialRegisters.Cr8); 363 364 /* Restore the DR registers */ 365 __writedr(0, ProcessorState->SpecialRegisters.KernelDr0); 366 __writedr(1, ProcessorState->SpecialRegisters.KernelDr1); 367 __writedr(2, ProcessorState->SpecialRegisters.KernelDr2); 368 __writedr(3, ProcessorState->SpecialRegisters.KernelDr3); 369 __writedr(6, ProcessorState->SpecialRegisters.KernelDr6); 370 __writedr(7, ProcessorState->SpecialRegisters.KernelDr7); 371 372 /* Restore GDT, IDT, LDT and TSS */ 373 __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit); 374 // __lldt(&ProcessorState->SpecialRegisters.Ldtr); 375 // __ltr(&ProcessorState->SpecialRegisters.Tr); 376 __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit); 377 378 _mm_setcsr(ProcessorState->SpecialRegisters.MxCsr); 379 // ProcessorState->SpecialRegisters.DebugControl 380 // ProcessorState->SpecialRegisters.LastBranchToRip 381 // ProcessorState->SpecialRegisters.LastBranchFromRip 382 // ProcessorState->SpecialRegisters.LastExceptionToRip 383 // ProcessorState->SpecialRegisters.LastExceptionFromRip 384 385 /* Restore MSRs */ 386 __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase); 387 __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap); 388 __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar); 389 __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar); 390 __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar); 391 __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask); 392 393 } 394 395 VOID 396 NTAPI 397 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState) 398 { 399 /* Save the CR registers */ 400 ProcessorState->SpecialRegisters.Cr0 = __readcr0(); 401 ProcessorState->SpecialRegisters.Cr2 = __readcr2(); 402 ProcessorState->SpecialRegisters.Cr3 = __readcr3(); 403 ProcessorState->SpecialRegisters.Cr4 = __readcr4(); 404 ProcessorState->SpecialRegisters.Cr8 = __readcr8(); 405 406 /* Save the DR registers */ 407 ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0); 408 ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1); 409 ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2); 410 ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3); 411 ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6); 412 ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7); 413 414 /* Save GDT, IDT, LDT and TSS */ 415 __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit); 416 __sldt(&ProcessorState->SpecialRegisters.Ldtr); 417 __str(&ProcessorState->SpecialRegisters.Tr); 418 __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit); 419 420 ProcessorState->SpecialRegisters.MxCsr = _mm_getcsr(); 421 // ProcessorState->SpecialRegisters.DebugControl = 422 // ProcessorState->SpecialRegisters.LastBranchToRip = 423 // ProcessorState->SpecialRegisters.LastBranchFromRip = 424 // ProcessorState->SpecialRegisters.LastExceptionToRip = 425 // ProcessorState->SpecialRegisters.LastExceptionFromRip = 426 427 /* Save MSRs */ 428 ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE); 429 ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE); 430 ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR); 431 ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR); 432 ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR); 433 ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK); 434 } 435 436 VOID 437 NTAPI 438 KeFlushEntireTb(IN BOOLEAN Invalid, 439 IN BOOLEAN AllProcessors) 440 { 441 KIRQL OldIrql; 442 443 // FIXME: halfplemented 444 /* Raise the IRQL for the TB Flush */ 445 OldIrql = KeRaiseIrqlToSynchLevel(); 446 447 /* Flush the TB for the Current CPU, and update the flush stamp */ 448 KeFlushCurrentTb(); 449 450 /* Update the flush stamp and return to original IRQL */ 451 InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1); 452 KeLowerIrql(OldIrql); 453 454 } 455 456 KAFFINITY 457 NTAPI 458 KeQueryActiveProcessors(VOID) 459 { 460 PAGED_CODE(); 461 462 /* Simply return the number of active processors */ 463 return KeActiveProcessors; 464 } 465 466 NTSTATUS 467 NTAPI 468 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState) 469 { 470 UNREFERENCED_PARAMETER(FloatingState); 471 return STATUS_SUCCESS; 472 } 473 474 NTSTATUS 475 NTAPI 476 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState) 477 { 478 UNREFERENCED_PARAMETER(FloatingState); 479 return STATUS_SUCCESS; 480 } 481 482 BOOLEAN 483 NTAPI 484 KeInvalidateAllCaches(VOID) 485 { 486 /* Invalidate all caches */ 487 __wbinvd(); 488 return TRUE; 489 } 490 491 /* 492 * @implemented 493 */ 494 ULONG 495 NTAPI 496 KeGetRecommendedSharedDataAlignment(VOID) 497 { 498 /* Return the global variable */ 499 return KeLargestCacheLine; 500 } 501 502 /* 503 * @implemented 504 */ 505 VOID 506 __cdecl 507 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State) 508 { 509 /* Capture the context */ 510 RtlCaptureContext(&State->ContextFrame); 511 512 /* Capture the control state */ 513 KiSaveProcessorControlState(State); 514 } 515 516 /* 517 * @implemented 518 */ 519 VOID 520 NTAPI 521 KeSetDmaIoCoherency(IN ULONG Coherency) 522 { 523 /* Save the coherency globally */ 524 KiDmaIoCoherency = Coherency; 525 } 526