1 /*- 2 * Copyright (c) KATO Takenori, 1997, 1998. 3 * 4 * All rights reserved. Unpublished rights reserved under the copyright 5 * laws of Japan. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer as 13 * the first lines of this file unmodified. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_cpu.h" 34 35 #include <sys/param.h> 36 #include <sys/kernel.h> 37 #include <sys/systm.h> 38 #include <sys/sysctl.h> 39 40 #include <machine/cputypes.h> 41 #include <machine/md_var.h> 42 #include <machine/specialreg.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 48 #define CPU_ENABLE_SSE 49 #endif 50 51 void initializecpu(void); 52 #if defined(I586_CPU) && defined(CPU_WT_ALLOC) 53 void enable_K5_wt_alloc(void); 54 void enable_K6_wt_alloc(void); 55 void enable_K6_2_wt_alloc(void); 56 #endif 57 58 #ifdef I486_CPU 59 static void init_5x86(void); 60 static void init_bluelightning(void); 61 static void init_486dlc(void); 62 static void init_cy486dx(void); 63 #ifdef CPU_I486_ON_386 64 static void init_i486_on_386(void); 65 #endif 66 static void init_6x86(void); 67 #endif /* I486_CPU */ 68 69 #ifdef I686_CPU 70 static void init_6x86MX(void); 71 static void init_ppro(void); 72 static void init_mendocino(void); 73 #endif 74 75 static int hw_instruction_sse; 76 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, 77 &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); 78 79 /* Must *NOT* be BSS or locore will bzero these after setting them */ 80 int cpu = 0; /* Are we 386, 386sx, 486, etc? */ 81 u_int cpu_feature = 0; /* Feature flags */ 82 u_int cpu_feature2 = 0; /* Feature flags */ 83 u_int amd_feature = 0; /* AMD feature flags */ 84 u_int amd_feature2 = 0; /* AMD feature flags */ 85 u_int amd_pminfo = 0; /* AMD advanced power management info */ 86 u_int via_feature_rng = 0; /* VIA RNG features */ 87 u_int via_feature_xcrypt = 0; /* VIA ACE features */ 88 u_int cpu_high = 0; /* Highest arg to CPUID */ 89 u_int cpu_id = 0; /* Stepping ID */ 90 u_int cpu_procinfo = 0; /* HyperThreading Info / Brand Index / CLFUSH */ 91 u_int cpu_procinfo2 = 0; /* Multicore info */ 92 char cpu_vendor[20] = ""; /* CPU Origin code */ 93 u_int cpu_vendor_id = 0; /* CPU vendor ID */ 94 95 SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD, 96 &via_feature_rng, 0, "VIA C3/C7 RNG feature available in CPU"); 97 SYSCTL_UINT(_hw, OID_AUTO, via_feature_xcrypt, CTLFLAG_RD, 98 &via_feature_xcrypt, 0, "VIA C3/C7 xcrypt feature available in CPU"); 99 100 #ifdef CPU_ENABLE_SSE 101 u_int cpu_fxsr; /* SSE enabled */ 102 u_int cpu_mxcsr_mask; /* valid bits in mxcsr */ 103 #endif 104 105 #ifdef I486_CPU 106 /* 107 * IBM Blue Lightning 108 */ 109 static void 110 init_bluelightning(void) 111 { 112 u_long eflags; 113 114 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) 115 need_post_dma_flush = 1; 116 #endif 117 118 eflags = read_eflags(); 119 disable_intr(); 120 121 load_cr0(rcr0() | CR0_CD | CR0_NW); 122 invd(); 123 124 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE 125 wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */ 126 #else 127 wrmsr(0x1000, 0x1c92LL); /* Intel FPU */ 128 #endif 129 /* Enables 13MB and 0-640KB cache. */ 130 wrmsr(0x1001, (0xd0LL << 32) | 0x3ff); 131 #ifdef CPU_BLUELIGHTNING_3X 132 wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */ 133 #else 134 wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */ 135 #endif 136 137 /* Enable caching in CR0. */ 138 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ 139 invd(); 140 write_eflags(eflags); 141 } 142 143 /* 144 * Cyrix 486SLC/DLC/SR/DR series 145 */ 146 static void 147 init_486dlc(void) 148 { 149 u_long eflags; 150 u_char ccr0; 151 152 eflags = read_eflags(); 153 disable_intr(); 154 invd(); 155 156 ccr0 = read_cyrix_reg(CCR0); 157 #ifndef CYRIX_CACHE_WORKS 158 ccr0 |= CCR0_NC1 | CCR0_BARB; 159 write_cyrix_reg(CCR0, ccr0); 160 invd(); 161 #else 162 ccr0 &= ~CCR0_NC0; 163 #ifndef CYRIX_CACHE_REALLY_WORKS 164 ccr0 |= CCR0_NC1 | CCR0_BARB; 165 #else 166 ccr0 |= CCR0_NC1; 167 #endif 168 #ifdef CPU_DIRECT_MAPPED_CACHE 169 ccr0 |= CCR0_CO; /* Direct mapped mode. */ 170 #endif 171 write_cyrix_reg(CCR0, ccr0); 172 173 /* Clear non-cacheable region. */ 174 write_cyrix_reg(NCR1+2, NCR_SIZE_0K); 175 write_cyrix_reg(NCR2+2, NCR_SIZE_0K); 176 write_cyrix_reg(NCR3+2, NCR_SIZE_0K); 177 write_cyrix_reg(NCR4+2, NCR_SIZE_0K); 178 179 write_cyrix_reg(0, 0); /* dummy write */ 180 181 /* Enable caching in CR0. */ 182 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ 183 invd(); 184 #endif /* !CYRIX_CACHE_WORKS */ 185 write_eflags(eflags); 186 } 187 188 189 /* 190 * Cyrix 486S/DX series 191 */ 192 static void 193 init_cy486dx(void) 194 { 195 u_long eflags; 196 u_char ccr2; 197 198 eflags = read_eflags(); 199 disable_intr(); 200 invd(); 201 202 ccr2 = read_cyrix_reg(CCR2); 203 #ifdef CPU_SUSP_HLT 204 ccr2 |= CCR2_SUSP_HLT; 205 #endif 206 207 #ifdef PC98 208 /* Enables WB cache interface pin and Lock NW bit in CR0. */ 209 ccr2 |= CCR2_WB | CCR2_LOCK_NW; 210 /* Unlock NW bit in CR0. */ 211 write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW); 212 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */ 213 #endif 214 215 write_cyrix_reg(CCR2, ccr2); 216 write_eflags(eflags); 217 } 218 219 220 /* 221 * Cyrix 5x86 222 */ 223 static void 224 init_5x86(void) 225 { 226 u_long eflags; 227 u_char ccr2, ccr3, ccr4, pcr0; 228 229 eflags = read_eflags(); 230 disable_intr(); 231 232 load_cr0(rcr0() | CR0_CD | CR0_NW); 233 wbinvd(); 234 235 (void)read_cyrix_reg(CCR3); /* dummy */ 236 237 /* Initialize CCR2. */ 238 ccr2 = read_cyrix_reg(CCR2); 239 ccr2 |= CCR2_WB; 240 #ifdef CPU_SUSP_HLT 241 ccr2 |= CCR2_SUSP_HLT; 242 #else 243 ccr2 &= ~CCR2_SUSP_HLT; 244 #endif 245 ccr2 |= CCR2_WT1; 246 write_cyrix_reg(CCR2, ccr2); 247 248 /* Initialize CCR4. */ 249 ccr3 = read_cyrix_reg(CCR3); 250 write_cyrix_reg(CCR3, CCR3_MAPEN0); 251 252 ccr4 = read_cyrix_reg(CCR4); 253 ccr4 |= CCR4_DTE; 254 ccr4 |= CCR4_MEM; 255 #ifdef CPU_FASTER_5X86_FPU 256 ccr4 |= CCR4_FASTFPE; 257 #else 258 ccr4 &= ~CCR4_FASTFPE; 259 #endif 260 ccr4 &= ~CCR4_IOMASK; 261 /******************************************************************** 262 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time 263 * should be 0 for errata fix. 264 ********************************************************************/ 265 #ifdef CPU_IORT 266 ccr4 |= CPU_IORT & CCR4_IOMASK; 267 #endif 268 write_cyrix_reg(CCR4, ccr4); 269 270 /* Initialize PCR0. */ 271 /**************************************************************** 272 * WARNING: RSTK_EN and LOOP_EN could make your system unstable. 273 * BTB_EN might make your system unstable. 274 ****************************************************************/ 275 pcr0 = read_cyrix_reg(PCR0); 276 #ifdef CPU_RSTK_EN 277 pcr0 |= PCR0_RSTK; 278 #else 279 pcr0 &= ~PCR0_RSTK; 280 #endif 281 #ifdef CPU_BTB_EN 282 pcr0 |= PCR0_BTB; 283 #else 284 pcr0 &= ~PCR0_BTB; 285 #endif 286 #ifdef CPU_LOOP_EN 287 pcr0 |= PCR0_LOOP; 288 #else 289 pcr0 &= ~PCR0_LOOP; 290 #endif 291 292 /**************************************************************** 293 * WARNING: if you use a memory mapped I/O device, don't use 294 * DISABLE_5X86_LSSER option, which may reorder memory mapped 295 * I/O access. 296 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER. 297 ****************************************************************/ 298 #ifdef CPU_DISABLE_5X86_LSSER 299 pcr0 &= ~PCR0_LSSER; 300 #else 301 pcr0 |= PCR0_LSSER; 302 #endif 303 write_cyrix_reg(PCR0, pcr0); 304 305 /* Restore CCR3. */ 306 write_cyrix_reg(CCR3, ccr3); 307 308 (void)read_cyrix_reg(0x80); /* dummy */ 309 310 /* Unlock NW bit in CR0. */ 311 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); 312 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */ 313 /* Lock NW bit in CR0. */ 314 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); 315 316 write_eflags(eflags); 317 } 318 319 #ifdef CPU_I486_ON_386 320 /* 321 * There are i486 based upgrade products for i386 machines. 322 * In this case, BIOS doesn't enables CPU cache. 323 */ 324 static void 325 init_i486_on_386(void) 326 { 327 u_long eflags; 328 329 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) 330 need_post_dma_flush = 1; 331 #endif 332 333 eflags = read_eflags(); 334 disable_intr(); 335 336 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */ 337 338 write_eflags(eflags); 339 } 340 #endif 341 342 /* 343 * Cyrix 6x86 344 * 345 * XXX - What should I do here? Please let me know. 346 */ 347 static void 348 init_6x86(void) 349 { 350 u_long eflags; 351 u_char ccr3, ccr4; 352 353 eflags = read_eflags(); 354 disable_intr(); 355 356 load_cr0(rcr0() | CR0_CD | CR0_NW); 357 wbinvd(); 358 359 /* Initialize CCR0. */ 360 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1); 361 362 /* Initialize CCR1. */ 363 #ifdef CPU_CYRIX_NO_LOCK 364 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK); 365 #else 366 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK); 367 #endif 368 369 /* Initialize CCR2. */ 370 #ifdef CPU_SUSP_HLT 371 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT); 372 #else 373 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT); 374 #endif 375 376 ccr3 = read_cyrix_reg(CCR3); 377 write_cyrix_reg(CCR3, CCR3_MAPEN0); 378 379 /* Initialize CCR4. */ 380 ccr4 = read_cyrix_reg(CCR4); 381 ccr4 |= CCR4_DTE; 382 ccr4 &= ~CCR4_IOMASK; 383 #ifdef CPU_IORT 384 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK)); 385 #else 386 write_cyrix_reg(CCR4, ccr4 | 7); 387 #endif 388 389 /* Initialize CCR5. */ 390 #ifdef CPU_WT_ALLOC 391 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC); 392 #endif 393 394 /* Restore CCR3. */ 395 write_cyrix_reg(CCR3, ccr3); 396 397 /* Unlock NW bit in CR0. */ 398 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); 399 400 /* 401 * Earlier revision of the 6x86 CPU could crash the system if 402 * L1 cache is in write-back mode. 403 */ 404 if ((cyrix_did & 0xff00) > 0x1600) 405 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ 406 else { 407 /* Revision 2.6 and lower. */ 408 #ifdef CYRIX_CACHE_REALLY_WORKS 409 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ 410 #else 411 load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */ 412 #endif 413 } 414 415 /* Lock NW bit in CR0. */ 416 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); 417 418 write_eflags(eflags); 419 } 420 #endif /* I486_CPU */ 421 422 #ifdef I686_CPU 423 /* 424 * Cyrix 6x86MX (code-named M2) 425 * 426 * XXX - What should I do here? Please let me know. 427 */ 428 static void 429 init_6x86MX(void) 430 { 431 u_long eflags; 432 u_char ccr3, ccr4; 433 434 eflags = read_eflags(); 435 disable_intr(); 436 437 load_cr0(rcr0() | CR0_CD | CR0_NW); 438 wbinvd(); 439 440 /* Initialize CCR0. */ 441 write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1); 442 443 /* Initialize CCR1. */ 444 #ifdef CPU_CYRIX_NO_LOCK 445 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK); 446 #else 447 write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK); 448 #endif 449 450 /* Initialize CCR2. */ 451 #ifdef CPU_SUSP_HLT 452 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT); 453 #else 454 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT); 455 #endif 456 457 ccr3 = read_cyrix_reg(CCR3); 458 write_cyrix_reg(CCR3, CCR3_MAPEN0); 459 460 /* Initialize CCR4. */ 461 ccr4 = read_cyrix_reg(CCR4); 462 ccr4 &= ~CCR4_IOMASK; 463 #ifdef CPU_IORT 464 write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK)); 465 #else 466 write_cyrix_reg(CCR4, ccr4 | 7); 467 #endif 468 469 /* Initialize CCR5. */ 470 #ifdef CPU_WT_ALLOC 471 write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC); 472 #endif 473 474 /* Restore CCR3. */ 475 write_cyrix_reg(CCR3, ccr3); 476 477 /* Unlock NW bit in CR0. */ 478 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); 479 480 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ 481 482 /* Lock NW bit in CR0. */ 483 write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); 484 485 write_eflags(eflags); 486 } 487 488 static void 489 init_ppro(void) 490 { 491 u_int64_t apicbase; 492 493 /* 494 * Local APIC should be disabled if it is not going to be used. 495 */ 496 apicbase = rdmsr(MSR_APICBASE); 497 apicbase &= ~APICBASE_ENABLED; 498 wrmsr(MSR_APICBASE, apicbase); 499 } 500 501 /* 502 * Initialize BBL_CR_CTL3 (Control register 3: used to configure the 503 * L2 cache). 504 */ 505 static void 506 init_mendocino(void) 507 { 508 #ifdef CPU_PPRO2CELERON 509 u_long eflags; 510 u_int64_t bbl_cr_ctl3; 511 512 eflags = read_eflags(); 513 disable_intr(); 514 515 load_cr0(rcr0() | CR0_CD | CR0_NW); 516 wbinvd(); 517 518 bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3); 519 520 /* If the L2 cache is configured, do nothing. */ 521 if (!(bbl_cr_ctl3 & 1)) { 522 bbl_cr_ctl3 = 0x134052bLL; 523 524 /* Set L2 Cache Latency (Default: 5). */ 525 #ifdef CPU_CELERON_L2_LATENCY 526 #if CPU_L2_LATENCY > 15 527 #error invalid CPU_L2_LATENCY. 528 #endif 529 bbl_cr_ctl3 |= CPU_L2_LATENCY << 1; 530 #else 531 bbl_cr_ctl3 |= 5 << 1; 532 #endif 533 wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3); 534 } 535 536 load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); 537 write_eflags(eflags); 538 #endif /* CPU_PPRO2CELERON */ 539 } 540 541 /* 542 * Initialize special VIA C3/C7 features 543 */ 544 static void 545 init_via(void) 546 { 547 u_int regs[4], val; 548 u_int64_t msreg; 549 550 do_cpuid(0xc0000000, regs); 551 val = regs[0]; 552 if (val >= 0xc0000001) { 553 do_cpuid(0xc0000001, regs); 554 val = regs[3]; 555 } else 556 val = 0; 557 558 /* Enable RNG if present and disabled */ 559 if (val & VIA_CPUID_HAS_RNG) { 560 if (!(val & VIA_CPUID_DO_RNG)) { 561 msreg = rdmsr(0x110B); 562 msreg |= 0x40; 563 wrmsr(0x110B, msreg); 564 } 565 via_feature_rng = VIA_HAS_RNG; 566 } 567 /* Enable AES engine if present and disabled */ 568 if (val & VIA_CPUID_HAS_ACE) { 569 if (!(val & VIA_CPUID_DO_ACE)) { 570 msreg = rdmsr(0x1107); 571 msreg |= (0x01 << 28); 572 wrmsr(0x1107, msreg); 573 } 574 via_feature_xcrypt |= VIA_HAS_AES; 575 } 576 /* Enable ACE2 engine if present and disabled */ 577 if (val & VIA_CPUID_HAS_ACE2) { 578 if (!(val & VIA_CPUID_DO_ACE2)) { 579 msreg = rdmsr(0x1107); 580 msreg |= (0x01 << 28); 581 wrmsr(0x1107, msreg); 582 } 583 via_feature_xcrypt |= VIA_HAS_AESCTR; 584 } 585 /* Enable SHA engine if present and disabled */ 586 if (val & VIA_CPUID_HAS_PHE) { 587 if (!(val & VIA_CPUID_DO_PHE)) { 588 msreg = rdmsr(0x1107); 589 msreg |= (0x01 << 28/**/); 590 wrmsr(0x1107, msreg); 591 } 592 via_feature_xcrypt |= VIA_HAS_SHA; 593 } 594 /* Enable MM engine if present and disabled */ 595 if (val & VIA_CPUID_HAS_PMM) { 596 if (!(val & VIA_CPUID_DO_PMM)) { 597 msreg = rdmsr(0x1107); 598 msreg |= (0x01 << 28/**/); 599 wrmsr(0x1107, msreg); 600 } 601 via_feature_xcrypt |= VIA_HAS_MM; 602 } 603 } 604 605 #endif /* I686_CPU */ 606 607 /* 608 * Initialize CR4 (Control register 4) to enable SSE instructions. 609 */ 610 void 611 enable_sse(void) 612 { 613 #if defined(CPU_ENABLE_SSE) 614 if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { 615 load_cr4(rcr4() | CR4_FXSR | CR4_XMM); 616 cpu_fxsr = hw_instruction_sse = 1; 617 } 618 #endif 619 } 620 621 void 622 initializecpu(void) 623 { 624 625 switch (cpu) { 626 #ifdef I486_CPU 627 case CPU_BLUE: 628 init_bluelightning(); 629 break; 630 case CPU_486DLC: 631 init_486dlc(); 632 break; 633 case CPU_CY486DX: 634 init_cy486dx(); 635 break; 636 case CPU_M1SC: 637 init_5x86(); 638 break; 639 #ifdef CPU_I486_ON_386 640 case CPU_486: 641 init_i486_on_386(); 642 break; 643 #endif 644 case CPU_M1: 645 init_6x86(); 646 break; 647 #endif /* I486_CPU */ 648 #ifdef I686_CPU 649 case CPU_M2: 650 init_6x86MX(); 651 break; 652 case CPU_686: 653 if (cpu_vendor_id == CPU_VENDOR_INTEL) { 654 switch (cpu_id & 0xff0) { 655 case 0x610: 656 init_ppro(); 657 break; 658 case 0x660: 659 init_mendocino(); 660 break; 661 } 662 } else if (cpu_vendor_id == CPU_VENDOR_AMD) { 663 #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK) 664 /* 665 * Sometimes the BIOS doesn't enable SSE instructions. 666 * According to AMD document 20734, the mobile 667 * Duron, the (mobile) Athlon 4 and the Athlon MP 668 * support SSE. These correspond to cpu_id 0x66X 669 * or 0x67X. 670 */ 671 if ((cpu_feature & CPUID_XMM) == 0 && 672 ((cpu_id & ~0xf) == 0x660 || 673 (cpu_id & ~0xf) == 0x670 || 674 (cpu_id & ~0xf) == 0x680)) { 675 u_int regs[4]; 676 wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000); 677 do_cpuid(1, regs); 678 cpu_feature = regs[3]; 679 } 680 #endif 681 } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) { 682 switch (cpu_id & 0xff0) { 683 case 0x690: 684 if ((cpu_id & 0xf) < 3) 685 break; 686 /* fall through. */ 687 case 0x6a0: 688 case 0x6d0: 689 case 0x6f0: 690 init_via(); 691 break; 692 default: 693 break; 694 } 695 } 696 #ifdef PAE 697 if ((amd_feature & AMDID_NX) != 0) { 698 uint64_t msr; 699 700 msr = rdmsr(MSR_EFER) | EFER_NXE; 701 wrmsr(MSR_EFER, msr); 702 pg_nx = PG_NX; 703 } 704 #endif 705 break; 706 #endif 707 default: 708 break; 709 } 710 enable_sse(); 711 712 #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) 713 /* 714 * OS should flush L1 cache by itself because no PC-98 supports 715 * non-Intel CPUs. Use wbinvd instruction before DMA transfer 716 * when need_pre_dma_flush = 1, use invd instruction after DMA 717 * transfer when need_post_dma_flush = 1. If your CPU upgrade 718 * product supports hardware cache control, you can add the 719 * CPU_UPGRADE_HW_CACHE option in your kernel configuration file. 720 * This option eliminates unneeded cache flush instruction(s). 721 */ 722 if (cpu_vendor_id == CPU_VENDOR_CYRIX) { 723 switch (cpu) { 724 #ifdef I486_CPU 725 case CPU_486DLC: 726 need_post_dma_flush = 1; 727 break; 728 case CPU_M1SC: 729 need_pre_dma_flush = 1; 730 break; 731 case CPU_CY486DX: 732 need_pre_dma_flush = 1; 733 #ifdef CPU_I486_ON_386 734 need_post_dma_flush = 1; 735 #endif 736 break; 737 #endif 738 default: 739 break; 740 } 741 } else if (cpu_vendor_id == CPU_VENDOR_AMD) { 742 switch (cpu_id & 0xFF0) { 743 case 0x470: /* Enhanced Am486DX2 WB */ 744 case 0x490: /* Enhanced Am486DX4 WB */ 745 case 0x4F0: /* Am5x86 WB */ 746 need_pre_dma_flush = 1; 747 break; 748 } 749 } else if (cpu_vendor_id == CPU_VENDOR_IBM) { 750 need_post_dma_flush = 1; 751 } else { 752 #ifdef CPU_I486_ON_386 753 need_pre_dma_flush = 1; 754 #endif 755 } 756 #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */ 757 } 758 759 #if defined(I586_CPU) && defined(CPU_WT_ALLOC) 760 /* 761 * Enable write allocate feature of AMD processors. 762 * Following two functions require the Maxmem variable being set. 763 */ 764 void 765 enable_K5_wt_alloc(void) 766 { 767 u_int64_t msr; 768 register_t savecrit; 769 770 /* 771 * Write allocate is supported only on models 1, 2, and 3, with 772 * a stepping of 4 or greater. 773 */ 774 if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) { 775 savecrit = intr_disable(); 776 msr = rdmsr(0x83); /* HWCR */ 777 wrmsr(0x83, msr & !(0x10)); 778 779 /* 780 * We have to tell the chip where the top of memory is, 781 * since video cards could have frame bufferes there, 782 * memory-mapped I/O could be there, etc. 783 */ 784 if(Maxmem > 0) 785 msr = Maxmem / 16; 786 else 787 msr = 0; 788 msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE; 789 #ifdef PC98 790 if (!(inb(0x43b) & 4)) { 791 wrmsr(0x86, 0x0ff00f0); 792 msr |= AMD_WT_ALLOC_PRE; 793 } 794 #else 795 /* 796 * There is no way to know wheter 15-16M hole exists or not. 797 * Therefore, we disable write allocate for this range. 798 */ 799 wrmsr(0x86, 0x0ff00f0); 800 msr |= AMD_WT_ALLOC_PRE; 801 #endif 802 wrmsr(0x85, msr); 803 804 msr=rdmsr(0x83); 805 wrmsr(0x83, msr|0x10); /* enable write allocate */ 806 intr_restore(savecrit); 807 } 808 } 809 810 void 811 enable_K6_wt_alloc(void) 812 { 813 quad_t size; 814 u_int64_t whcr; 815 u_long eflags; 816 817 eflags = read_eflags(); 818 disable_intr(); 819 wbinvd(); 820 821 #ifdef CPU_DISABLE_CACHE 822 /* 823 * Certain K6-2 box becomes unstable when write allocation is 824 * enabled. 825 */ 826 /* 827 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12), 828 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported. 829 * All other bits in TR12 have no effect on the processer's operation. 830 * The I/O Trap Restart function (bit 9 of TR12) is always enabled 831 * on the AMD-K6. 832 */ 833 wrmsr(0x0000000e, (u_int64_t)0x0008); 834 #endif 835 /* Don't assume that memory size is aligned with 4M. */ 836 if (Maxmem > 0) 837 size = ((Maxmem >> 8) + 3) >> 2; 838 else 839 size = 0; 840 841 /* Limit is 508M bytes. */ 842 if (size > 0x7f) 843 size = 0x7f; 844 whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1); 845 846 #if defined(PC98) || defined(NO_MEMORY_HOLE) 847 if (whcr & (0x7fLL << 1)) { 848 #ifdef PC98 849 /* 850 * If bit 2 of port 0x43b is 0, disable wrte allocate for the 851 * 15-16M range. 852 */ 853 if (!(inb(0x43b) & 4)) 854 whcr &= ~0x0001LL; 855 else 856 #endif 857 whcr |= 0x0001LL; 858 } 859 #else 860 /* 861 * There is no way to know wheter 15-16M hole exists or not. 862 * Therefore, we disable write allocate for this range. 863 */ 864 whcr &= ~0x0001LL; 865 #endif 866 wrmsr(0x0c0000082, whcr); 867 868 write_eflags(eflags); 869 } 870 871 void 872 enable_K6_2_wt_alloc(void) 873 { 874 quad_t size; 875 u_int64_t whcr; 876 u_long eflags; 877 878 eflags = read_eflags(); 879 disable_intr(); 880 wbinvd(); 881 882 #ifdef CPU_DISABLE_CACHE 883 /* 884 * Certain K6-2 box becomes unstable when write allocation is 885 * enabled. 886 */ 887 /* 888 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12), 889 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported. 890 * All other bits in TR12 have no effect on the processer's operation. 891 * The I/O Trap Restart function (bit 9 of TR12) is always enabled 892 * on the AMD-K6. 893 */ 894 wrmsr(0x0000000e, (u_int64_t)0x0008); 895 #endif 896 /* Don't assume that memory size is aligned with 4M. */ 897 if (Maxmem > 0) 898 size = ((Maxmem >> 8) + 3) >> 2; 899 else 900 size = 0; 901 902 /* Limit is 4092M bytes. */ 903 if (size > 0x3fff) 904 size = 0x3ff; 905 whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22); 906 907 #if defined(PC98) || defined(NO_MEMORY_HOLE) 908 if (whcr & (0x3ffLL << 22)) { 909 #ifdef PC98 910 /* 911 * If bit 2 of port 0x43b is 0, disable wrte allocate for the 912 * 15-16M range. 913 */ 914 if (!(inb(0x43b) & 4)) 915 whcr &= ~(1LL << 16); 916 else 917 #endif 918 whcr |= 1LL << 16; 919 } 920 #else 921 /* 922 * There is no way to know wheter 15-16M hole exists or not. 923 * Therefore, we disable write allocate for this range. 924 */ 925 whcr &= ~(1LL << 16); 926 #endif 927 wrmsr(0x0c0000082, whcr); 928 929 write_eflags(eflags); 930 } 931 #endif /* I585_CPU && CPU_WT_ALLOC */ 932 933 #include "opt_ddb.h" 934 #ifdef DDB 935 #include <ddb/ddb.h> 936 937 DB_SHOW_COMMAND(cyrixreg, cyrixreg) 938 { 939 u_long eflags; 940 u_int cr0; 941 u_char ccr1, ccr2, ccr3; 942 u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0; 943 944 cr0 = rcr0(); 945 if (cpu_vendor_id == CPU_VENDOR_CYRIX) { 946 eflags = read_eflags(); 947 disable_intr(); 948 949 950 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) { 951 ccr0 = read_cyrix_reg(CCR0); 952 } 953 ccr1 = read_cyrix_reg(CCR1); 954 ccr2 = read_cyrix_reg(CCR2); 955 ccr3 = read_cyrix_reg(CCR3); 956 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) { 957 write_cyrix_reg(CCR3, CCR3_MAPEN0); 958 ccr4 = read_cyrix_reg(CCR4); 959 if ((cpu == CPU_M1) || (cpu == CPU_M2)) 960 ccr5 = read_cyrix_reg(CCR5); 961 else 962 pcr0 = read_cyrix_reg(PCR0); 963 write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */ 964 } 965 write_eflags(eflags); 966 967 if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) 968 printf("CCR0=%x, ", (u_int)ccr0); 969 970 printf("CCR1=%x, CCR2=%x, CCR3=%x", 971 (u_int)ccr1, (u_int)ccr2, (u_int)ccr3); 972 if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) { 973 printf(", CCR4=%x, ", (u_int)ccr4); 974 if (cpu == CPU_M1SC) 975 printf("PCR0=%x\n", pcr0); 976 else 977 printf("CCR5=%x\n", ccr5); 978 } 979 } 980 printf("CR0=%x\n", cr0); 981 } 982 #endif /* DDB */ 983