1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: GPL - See COPYING in the top level directory 4 * FILE: ntoskrnl/ke/amd64/cpu.c 5 * PURPOSE: Routines for CPU-level support 6 * PROGRAMMERS: Alex Ionescu (alex.ionescu@reactos.org) 7 * Timo Kreuzer (timo.kreuzer@reactos.org) 8 */ 9 10 /* INCLUDES *****************************************************************/ 11 12 #include <ntoskrnl.h> 13 #define NDEBUG 14 #include <debug.h> 15 16 /* FIXME: Local EFLAGS defines not used anywhere else */ 17 #define EFLAGS_IOPL 0x3000 18 #define EFLAGS_NF 0x4000 19 #define EFLAGS_RF 0x10000 20 #define EFLAGS_ID 0x200000 21 22 /* GLOBALS *******************************************************************/ 23 24 /* The Boot TSS */ 25 KTSS64 KiBootTss; 26 27 /* CPU Features and Flags */ 28 ULONG KeI386CpuType; 29 ULONG KeI386CpuStep; 30 ULONG KeI386MachineType; 31 ULONG KeI386NpxPresent = 1; 32 ULONG KeLargestCacheLine = 0x40; 33 ULONG KiDmaIoCoherency = 0; 34 BOOLEAN KiSMTProcessorsPresent; 35 36 /* Freeze data */ 37 KIRQL KiOldIrql; 38 ULONG KiFreezeFlag; 39 40 /* Flush data */ 41 volatile LONG KiTbFlushTimeStamp; 42 43 /* CPU Signatures */ 44 static const CHAR CmpIntelID[] = "GenuineIntel"; 45 static const CHAR CmpAmdID[] = "AuthenticAMD"; 46 static const CHAR CmpCyrixID[] = "CyrixInstead"; 47 static const CHAR CmpTransmetaID[] = "GenuineTMx86"; 48 static const CHAR CmpCentaurID[] = "CentaurHauls"; 49 static const CHAR CmpRiseID[] = "RiseRiseRise"; 50 51 /* FUNCTIONS *****************************************************************/ 52 53 CODE_SEG("INIT") 54 VOID 55 NTAPI 56 KiSetProcessorType(VOID) 57 { 58 CPU_INFO CpuInfo; 59 ULONG Stepping, Type; 60 61 /* Do CPUID 1 now */ 62 KiCpuId(&CpuInfo, 1); 63 64 /* 65 * Get the Stepping and Type. The stepping contains both the 66 * Model and the Step, while the Type contains the returned Type. 67 * We ignore the family. 68 * 69 * For the stepping, we convert this: zzzzzzxy into this: x0y 70 */ 71 Stepping = CpuInfo.Eax & 0xF0; 72 Stepping <<= 4; 73 Stepping += (CpuInfo.Eax & 0xFF); 74 Stepping &= 0xF0F; 75 Type = CpuInfo.Eax & 0xF00; 76 Type >>= 8; 77 78 /* Save them in the PRCB */ 79 KeGetCurrentPrcb()->CpuID = TRUE; 80 KeGetCurrentPrcb()->CpuType = (UCHAR)Type; 81 KeGetCurrentPrcb()->CpuStep = (USHORT)Stepping; 82 } 83 84 ULONG 85 NTAPI 86 KiGetCpuVendor(VOID) 87 { 88 PKPRCB Prcb = KeGetCurrentPrcb(); 89 CPU_INFO CpuInfo; 90 91 /* Get the Vendor ID and null-terminate it */ 92 KiCpuId(&CpuInfo, 0); 93 94 /* Copy it to the PRCB and null-terminate it */ 95 *(ULONG*)&Prcb->VendorString[0] = CpuInfo.Ebx; 96 *(ULONG*)&Prcb->VendorString[4] = CpuInfo.Edx; 97 *(ULONG*)&Prcb->VendorString[8] = CpuInfo.Ecx; 98 Prcb->VendorString[12] = 0; 99 100 /* Now check the CPU Type */ 101 if (!strcmp((PCHAR)Prcb->VendorString, CmpIntelID)) 102 { 103 return CPU_INTEL; 104 } 105 else if (!strcmp((PCHAR)Prcb->VendorString, CmpAmdID)) 106 { 107 return CPU_AMD; 108 } 109 else if (!strcmp((PCHAR)Prcb->VendorString, CmpCentaurID)) 110 { 111 DPRINT1("VIA CPUs not fully supported\n"); 112 return CPU_VIA; 113 } 114 else if (!strcmp((PCHAR)Prcb->VendorString, CmpRiseID)) 115 { 116 DPRINT1("Rise CPUs not fully supported\n"); 117 return 0; 118 } 119 120 /* Invalid CPU */ 121 return CPU_UNKNOWN; 122 } 123 124 CODE_SEG("INIT") 125 ULONG 126 NTAPI 127 KiGetFeatureBits(VOID) 128 { 129 PKPRCB Prcb = KeGetCurrentPrcb(); 130 ULONG Vendor; 131 ULONG FeatureBits = KF_WORKING_PTE; 132 CPU_INFO CpuInfo; 133 134 /* Get the Vendor ID */ 135 Vendor = KiGetCpuVendor(); 136 137 /* Make sure we got a valid vendor ID at least. */ 138 if (!Vendor) return FeatureBits; 139 140 /* Get the CPUID Info. */ 141 KiCpuId(&CpuInfo, 1); 142 143 /* Set the initial APIC ID */ 144 Prcb->InitialApicId = (UCHAR)(CpuInfo.Ebx >> 24); 145 146 /* Convert all CPUID Feature bits into our format */ 147 if (CpuInfo.Edx & X86_FEATURE_VME) FeatureBits |= KF_V86_VIS | KF_CR4; 148 if (CpuInfo.Edx & X86_FEATURE_PSE) FeatureBits |= KF_LARGE_PAGE | KF_CR4; 149 if (CpuInfo.Edx & X86_FEATURE_TSC) FeatureBits |= KF_RDTSC; 150 if (CpuInfo.Edx & X86_FEATURE_CX8) FeatureBits |= KF_CMPXCHG8B; 151 if (CpuInfo.Edx & X86_FEATURE_SYSCALL) FeatureBits |= KF_FAST_SYSCALL; 152 if (CpuInfo.Edx & X86_FEATURE_MTTR) FeatureBits |= KF_MTRR; 153 if (CpuInfo.Edx & X86_FEATURE_PGE) FeatureBits |= KF_GLOBAL_PAGE | KF_CR4; 154 if (CpuInfo.Edx & X86_FEATURE_CMOV) FeatureBits |= KF_CMOV; 155 if (CpuInfo.Edx & X86_FEATURE_PAT) FeatureBits |= KF_PAT; 156 if (CpuInfo.Edx & X86_FEATURE_DS) FeatureBits |= KF_DTS; 157 if (CpuInfo.Edx & X86_FEATURE_MMX) FeatureBits |= KF_MMX; 158 if (CpuInfo.Edx & X86_FEATURE_FXSR) FeatureBits |= KF_FXSR; 159 if (CpuInfo.Edx & X86_FEATURE_SSE) FeatureBits |= KF_XMMI; 160 if (CpuInfo.Edx & X86_FEATURE_SSE2) FeatureBits |= KF_XMMI64; 161 162 if (CpuInfo.Ecx & X86_FEATURE_SSE3) FeatureBits |= KF_SSE3; 163 //if (CpuInfo.Ecx & X86_FEATURE_MONITOR) FeatureBits |= KF_MONITOR; 164 //if (CpuInfo.Ecx & X86_FEATURE_SSSE3) FeatureBits |= KF_SSE3SUP; 165 if (CpuInfo.Ecx & X86_FEATURE_CX16) FeatureBits |= KF_CMPXCHG16B; 166 //if (CpuInfo.Ecx & X86_FEATURE_SSE41) FeatureBits |= KF_SSE41; 167 //if (CpuInfo.Ecx & X86_FEATURE_POPCNT) FeatureBits |= KF_POPCNT; 168 if (CpuInfo.Ecx & X86_FEATURE_XSAVE) FeatureBits |= KF_XSTATE; 169 170 /* Check if the CPU has hyper-threading */ 171 if (CpuInfo.Edx & X86_FEATURE_HT) 172 { 173 /* Set the number of logical CPUs */ 174 Prcb->LogicalProcessorsPerPhysicalProcessor = (UCHAR)(CpuInfo.Ebx >> 16); 175 if (Prcb->LogicalProcessorsPerPhysicalProcessor > 1) 176 { 177 /* We're on dual-core */ 178 KiSMTProcessorsPresent = TRUE; 179 } 180 } 181 else 182 { 183 /* We only have a single CPU */ 184 Prcb->LogicalProcessorsPerPhysicalProcessor = 1; 185 } 186 187 /* Check extended cpuid features */ 188 KiCpuId(&CpuInfo, 0x80000000); 189 if ((CpuInfo.Eax & 0xffffff00) == 0x80000000) 190 { 191 /* Check if CPUID 0x80000001 is supported */ 192 if (CpuInfo.Eax >= 0x80000001) 193 { 194 /* Check which extended features are available. */ 195 KiCpuId(&CpuInfo, 0x80000001); 196 197 /* Check if NX-bit is supported */ 198 if (CpuInfo.Edx & X86_FEATURE_NX) FeatureBits |= KF_NX_BIT; 199 200 /* Now handle each features for each CPU Vendor */ 201 switch (Vendor) 202 { 203 case CPU_AMD: 204 if (CpuInfo.Edx & 0x80000000) FeatureBits |= KF_3DNOW; 205 break; 206 } 207 } 208 } 209 210 /* Return the Feature Bits */ 211 return FeatureBits; 212 } 213 214 CODE_SEG("INIT") 215 VOID 216 NTAPI 217 KiGetCacheInformation(VOID) 218 { 219 PKIPCR Pcr = (PKIPCR)KeGetPcr(); 220 ULONG Vendor; 221 ULONG CacheRequests = 0, i; 222 ULONG CurrentRegister; 223 UCHAR RegisterByte; 224 BOOLEAN FirstPass = TRUE; 225 CPU_INFO CpuInfo; 226 227 /* Set default L2 size */ 228 Pcr->SecondLevelCacheSize = 0; 229 230 /* Get the Vendor ID and make sure we support CPUID */ 231 Vendor = KiGetCpuVendor(); 232 if (!Vendor) return; 233 234 /* Check the Vendor ID */ 235 switch (Vendor) 236 { 237 /* Handle Intel case */ 238 case CPU_INTEL: 239 240 /*Check if we support CPUID 2 */ 241 KiCpuId(&CpuInfo, 0); 242 if (CpuInfo.Eax >= 2) 243 { 244 /* We need to loop for the number of times CPUID will tell us to */ 245 do 246 { 247 /* Do the CPUID call */ 248 KiCpuId(&CpuInfo, 2); 249 250 /* Check if it was the first call */ 251 if (FirstPass) 252 { 253 /* 254 * The number of times to loop is the first byte. Read 255 * it and then destroy it so we don't get confused. 256 */ 257 CacheRequests = CpuInfo.Eax & 0xFF; 258 CpuInfo.Eax &= 0xFFFFFF00; 259 260 /* Don't go over this again */ 261 FirstPass = FALSE; 262 } 263 264 /* Loop all 4 registers */ 265 for (i = 0; i < 4; i++) 266 { 267 /* Get the current register */ 268 CurrentRegister = CpuInfo.AsUINT32[i]; 269 270 /* 271 * If the upper bit is set, then this register should 272 * be skipped. 273 */ 274 if (CurrentRegister & 0x80000000) continue; 275 276 /* Keep looping for every byte inside this register */ 277 while (CurrentRegister) 278 { 279 /* Read a byte, skip a byte. */ 280 RegisterByte = (UCHAR)(CurrentRegister & 0xFF); 281 CurrentRegister >>= 8; 282 if (!RegisterByte) continue; 283 284 /* 285 * Valid values are from 0x40 (0 bytes) to 0x49 286 * (32MB), or from 0x80 to 0x89 (same size but 287 * 8-way associative. 288 */ 289 if (((RegisterByte > 0x40) && 290 (RegisterByte <= 0x49)) || 291 ((RegisterByte > 0x80) && 292 (RegisterByte <= 0x89))) 293 { 294 /* Mask out only the first nibble */ 295 RegisterByte &= 0x0F; 296 297 /* Set the L2 Cache Size */ 298 Pcr->SecondLevelCacheSize = 0x10000 << 299 RegisterByte; 300 } 301 } 302 } 303 } while (--CacheRequests); 304 } 305 break; 306 307 case CPU_AMD: 308 309 /* Check if we support CPUID 0x80000006 */ 310 KiCpuId(&CpuInfo, 0x80000000); 311 if (CpuInfo.Eax >= 6) 312 { 313 /* Get 2nd level cache and tlb size */ 314 KiCpuId(&CpuInfo, 0x80000006); 315 316 /* Set the L2 Cache Size */ 317 Pcr->SecondLevelCacheSize = (CpuInfo.Ecx & 0xFFFF0000) >> 6; 318 } 319 break; 320 } 321 } 322 323 VOID 324 NTAPI 325 KeFlushCurrentTb(VOID) 326 { 327 /* Flush the TLB by resetting CR3 */ 328 __writecr3(__readcr3()); 329 } 330 331 VOID 332 NTAPI 333 KiRestoreProcessorControlState(PKPROCESSOR_STATE ProcessorState) 334 { 335 /* Restore the CR registers */ 336 __writecr0(ProcessorState->SpecialRegisters.Cr0); 337 // __writecr2(ProcessorState->SpecialRegisters.Cr2); 338 __writecr3(ProcessorState->SpecialRegisters.Cr3); 339 __writecr4(ProcessorState->SpecialRegisters.Cr4); 340 __writecr8(ProcessorState->SpecialRegisters.Cr8); 341 342 /* Restore the DR registers */ 343 __writedr(0, ProcessorState->SpecialRegisters.KernelDr0); 344 __writedr(1, ProcessorState->SpecialRegisters.KernelDr1); 345 __writedr(2, ProcessorState->SpecialRegisters.KernelDr2); 346 __writedr(3, ProcessorState->SpecialRegisters.KernelDr3); 347 __writedr(6, ProcessorState->SpecialRegisters.KernelDr6); 348 __writedr(7, ProcessorState->SpecialRegisters.KernelDr7); 349 350 /* Restore GDT, IDT, LDT and TSS */ 351 __lgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit); 352 // __lldt(&ProcessorState->SpecialRegisters.Ldtr); 353 // __ltr(&ProcessorState->SpecialRegisters.Tr); 354 __lidt(&ProcessorState->SpecialRegisters.Idtr.Limit); 355 356 // __ldmxcsr(&ProcessorState->SpecialRegisters.MxCsr); // FIXME 357 // ProcessorState->SpecialRegisters.DebugControl 358 // ProcessorState->SpecialRegisters.LastBranchToRip 359 // ProcessorState->SpecialRegisters.LastBranchFromRip 360 // ProcessorState->SpecialRegisters.LastExceptionToRip 361 // ProcessorState->SpecialRegisters.LastExceptionFromRip 362 363 /* Restore MSRs */ 364 __writemsr(X86_MSR_GSBASE, ProcessorState->SpecialRegisters.MsrGsBase); 365 __writemsr(X86_MSR_KERNEL_GSBASE, ProcessorState->SpecialRegisters.MsrGsSwap); 366 __writemsr(X86_MSR_STAR, ProcessorState->SpecialRegisters.MsrStar); 367 __writemsr(X86_MSR_LSTAR, ProcessorState->SpecialRegisters.MsrLStar); 368 __writemsr(X86_MSR_CSTAR, ProcessorState->SpecialRegisters.MsrCStar); 369 __writemsr(X86_MSR_SFMASK, ProcessorState->SpecialRegisters.MsrSyscallMask); 370 371 } 372 373 VOID 374 NTAPI 375 KiSaveProcessorControlState(OUT PKPROCESSOR_STATE ProcessorState) 376 { 377 /* Save the CR registers */ 378 ProcessorState->SpecialRegisters.Cr0 = __readcr0(); 379 ProcessorState->SpecialRegisters.Cr2 = __readcr2(); 380 ProcessorState->SpecialRegisters.Cr3 = __readcr3(); 381 ProcessorState->SpecialRegisters.Cr4 = __readcr4(); 382 ProcessorState->SpecialRegisters.Cr8 = __readcr8(); 383 384 /* Save the DR registers */ 385 ProcessorState->SpecialRegisters.KernelDr0 = __readdr(0); 386 ProcessorState->SpecialRegisters.KernelDr1 = __readdr(1); 387 ProcessorState->SpecialRegisters.KernelDr2 = __readdr(2); 388 ProcessorState->SpecialRegisters.KernelDr3 = __readdr(3); 389 ProcessorState->SpecialRegisters.KernelDr6 = __readdr(6); 390 ProcessorState->SpecialRegisters.KernelDr7 = __readdr(7); 391 392 /* Save GDT, IDT, LDT and TSS */ 393 __sgdt(&ProcessorState->SpecialRegisters.Gdtr.Limit); 394 __sldt(&ProcessorState->SpecialRegisters.Ldtr); 395 __str(&ProcessorState->SpecialRegisters.Tr); 396 __sidt(&ProcessorState->SpecialRegisters.Idtr.Limit); 397 398 // __stmxcsr(&ProcessorState->SpecialRegisters.MxCsr); 399 // ProcessorState->SpecialRegisters.DebugControl = 400 // ProcessorState->SpecialRegisters.LastBranchToRip = 401 // ProcessorState->SpecialRegisters.LastBranchFromRip = 402 // ProcessorState->SpecialRegisters.LastExceptionToRip = 403 // ProcessorState->SpecialRegisters.LastExceptionFromRip = 404 405 /* Save MSRs */ 406 ProcessorState->SpecialRegisters.MsrGsBase = __readmsr(X86_MSR_GSBASE); 407 ProcessorState->SpecialRegisters.MsrGsSwap = __readmsr(X86_MSR_KERNEL_GSBASE); 408 ProcessorState->SpecialRegisters.MsrStar = __readmsr(X86_MSR_STAR); 409 ProcessorState->SpecialRegisters.MsrLStar = __readmsr(X86_MSR_LSTAR); 410 ProcessorState->SpecialRegisters.MsrCStar = __readmsr(X86_MSR_CSTAR); 411 ProcessorState->SpecialRegisters.MsrSyscallMask = __readmsr(X86_MSR_SFMASK); 412 } 413 414 VOID 415 NTAPI 416 KeFlushEntireTb(IN BOOLEAN Invalid, 417 IN BOOLEAN AllProcessors) 418 { 419 KIRQL OldIrql; 420 421 // FIXME: halfplemented 422 /* Raise the IRQL for the TB Flush */ 423 OldIrql = KeRaiseIrqlToSynchLevel(); 424 425 /* Flush the TB for the Current CPU, and update the flush stamp */ 426 KeFlushCurrentTb(); 427 428 /* Update the flush stamp and return to original IRQL */ 429 InterlockedExchangeAdd(&KiTbFlushTimeStamp, 1); 430 KeLowerIrql(OldIrql); 431 432 } 433 434 KAFFINITY 435 NTAPI 436 KeQueryActiveProcessors(VOID) 437 { 438 PAGED_CODE(); 439 440 /* Simply return the number of active processors */ 441 return KeActiveProcessors; 442 } 443 444 NTSTATUS 445 NTAPI 446 KxSaveFloatingPointState(OUT PKFLOATING_SAVE FloatingState) 447 { 448 UNREFERENCED_PARAMETER(FloatingState); 449 return STATUS_SUCCESS; 450 } 451 452 NTSTATUS 453 NTAPI 454 KxRestoreFloatingPointState(IN PKFLOATING_SAVE FloatingState) 455 { 456 UNREFERENCED_PARAMETER(FloatingState); 457 return STATUS_SUCCESS; 458 } 459 460 BOOLEAN 461 NTAPI 462 KeInvalidateAllCaches(VOID) 463 { 464 /* Invalidate all caches */ 465 __wbinvd(); 466 return TRUE; 467 } 468 469 /* 470 * @implemented 471 */ 472 ULONG 473 NTAPI 474 KeGetRecommendedSharedDataAlignment(VOID) 475 { 476 /* Return the global variable */ 477 return KeLargestCacheLine; 478 } 479 480 /* 481 * @implemented 482 */ 483 VOID 484 __cdecl 485 KeSaveStateForHibernate(IN PKPROCESSOR_STATE State) 486 { 487 /* Capture the context */ 488 RtlCaptureContext(&State->ContextFrame); 489 490 /* Capture the control state */ 491 KiSaveProcessorControlState(State); 492 } 493 494 /* 495 * @implemented 496 */ 497 VOID 498 NTAPI 499 KeSetDmaIoCoherency(IN ULONG Coherency) 500 { 501 /* Save the coherency globally */ 502 KiDmaIoCoherency = Coherency; 503 } 504