xref: /freebsd/sys/i386/i386/initcpu.c (revision 4f52dfbb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) KATO Takenori, 1997, 1998.
5  *
6  * All rights reserved.  Unpublished rights reserved under the copyright
7  * laws of Japan.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer as
15  *    the first lines of this file unmodified.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_cpu.h"
36 
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/systm.h>
40 #include <sys/sysctl.h>
41 
42 #include <machine/cputypes.h>
43 #include <machine/md_var.h>
44 #include <machine/specialreg.h>
45 
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 
49 #ifdef I486_CPU
50 static void init_5x86(void);
51 static void init_bluelightning(void);
52 static void init_486dlc(void);
53 static void init_cy486dx(void);
54 #ifdef CPU_I486_ON_386
55 static void init_i486_on_386(void);
56 #endif
57 static void init_6x86(void);
58 #endif /* I486_CPU */
59 
60 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
61 static void	enable_K5_wt_alloc(void);
62 static void	enable_K6_wt_alloc(void);
63 static void	enable_K6_2_wt_alloc(void);
64 #endif
65 
66 #ifdef I686_CPU
67 static void	init_6x86MX(void);
68 static void	init_ppro(void);
69 static void	init_mendocino(void);
70 #endif
71 
72 static int	hw_instruction_sse;
73 SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD,
74     &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU");
75 /*
76  * -1: automatic (default)
77  *  0: keep enable CLFLUSH
78  *  1: force disable CLFLUSH
79  */
80 static int	hw_clflush_disable = -1;
81 
82 u_int	cyrix_did;		/* Device ID of Cyrix CPU */
83 
84 #ifdef I486_CPU
85 /*
86  * IBM Blue Lightning
87  */
88 static void
89 init_bluelightning(void)
90 {
91 	register_t saveintr;
92 
93 	saveintr = intr_disable();
94 
95 	load_cr0(rcr0() | CR0_CD | CR0_NW);
96 	invd();
97 
98 #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE
99 	wrmsr(0x1000, 0x9c92LL);	/* FP operand can be cacheable on Cyrix FPU */
100 #else
101 	wrmsr(0x1000, 0x1c92LL);	/* Intel FPU */
102 #endif
103 	/* Enables 13MB and 0-640KB cache. */
104 	wrmsr(0x1001, (0xd0LL << 32) | 0x3ff);
105 #ifdef CPU_BLUELIGHTNING_3X
106 	wrmsr(0x1002, 0x04000000LL);	/* Enables triple-clock mode. */
107 #else
108 	wrmsr(0x1002, 0x03000000LL);	/* Enables double-clock mode. */
109 #endif
110 
111 	/* Enable caching in CR0. */
112 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
113 	invd();
114 	intr_restore(saveintr);
115 }
116 
117 /*
118  * Cyrix 486SLC/DLC/SR/DR series
119  */
120 static void
121 init_486dlc(void)
122 {
123 	register_t saveintr;
124 	u_char	ccr0;
125 
126 	saveintr = intr_disable();
127 	invd();
128 
129 	ccr0 = read_cyrix_reg(CCR0);
130 #ifndef CYRIX_CACHE_WORKS
131 	ccr0 |= CCR0_NC1 | CCR0_BARB;
132 	write_cyrix_reg(CCR0, ccr0);
133 	invd();
134 #else
135 	ccr0 &= ~CCR0_NC0;
136 #ifndef CYRIX_CACHE_REALLY_WORKS
137 	ccr0 |= CCR0_NC1 | CCR0_BARB;
138 #else
139 	ccr0 |= CCR0_NC1;
140 #endif
141 #ifdef CPU_DIRECT_MAPPED_CACHE
142 	ccr0 |= CCR0_CO;			/* Direct mapped mode. */
143 #endif
144 	write_cyrix_reg(CCR0, ccr0);
145 
146 	/* Clear non-cacheable region. */
147 	write_cyrix_reg(NCR1+2, NCR_SIZE_0K);
148 	write_cyrix_reg(NCR2+2, NCR_SIZE_0K);
149 	write_cyrix_reg(NCR3+2, NCR_SIZE_0K);
150 	write_cyrix_reg(NCR4+2, NCR_SIZE_0K);
151 
152 	write_cyrix_reg(0, 0);	/* dummy write */
153 
154 	/* Enable caching in CR0. */
155 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
156 	invd();
157 #endif /* !CYRIX_CACHE_WORKS */
158 	intr_restore(saveintr);
159 }
160 
161 
162 /*
163  * Cyrix 486S/DX series
164  */
165 static void
166 init_cy486dx(void)
167 {
168 	register_t saveintr;
169 	u_char	ccr2;
170 
171 	saveintr = intr_disable();
172 	invd();
173 
174 	ccr2 = read_cyrix_reg(CCR2);
175 #ifdef CPU_SUSP_HLT
176 	ccr2 |= CCR2_SUSP_HLT;
177 #endif
178 
179 	write_cyrix_reg(CCR2, ccr2);
180 	intr_restore(saveintr);
181 }
182 
183 
184 /*
185  * Cyrix 5x86
186  */
187 static void
188 init_5x86(void)
189 {
190 	register_t saveintr;
191 	u_char	ccr2, ccr3, ccr4, pcr0;
192 
193 	saveintr = intr_disable();
194 
195 	load_cr0(rcr0() | CR0_CD | CR0_NW);
196 	wbinvd();
197 
198 	(void)read_cyrix_reg(CCR3);		/* dummy */
199 
200 	/* Initialize CCR2. */
201 	ccr2 = read_cyrix_reg(CCR2);
202 	ccr2 |= CCR2_WB;
203 #ifdef CPU_SUSP_HLT
204 	ccr2 |= CCR2_SUSP_HLT;
205 #else
206 	ccr2 &= ~CCR2_SUSP_HLT;
207 #endif
208 	ccr2 |= CCR2_WT1;
209 	write_cyrix_reg(CCR2, ccr2);
210 
211 	/* Initialize CCR4. */
212 	ccr3 = read_cyrix_reg(CCR3);
213 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
214 
215 	ccr4 = read_cyrix_reg(CCR4);
216 	ccr4 |= CCR4_DTE;
217 	ccr4 |= CCR4_MEM;
218 #ifdef CPU_FASTER_5X86_FPU
219 	ccr4 |= CCR4_FASTFPE;
220 #else
221 	ccr4 &= ~CCR4_FASTFPE;
222 #endif
223 	ccr4 &= ~CCR4_IOMASK;
224 	/********************************************************************
225 	 * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time
226 	 * should be 0 for errata fix.
227 	 ********************************************************************/
228 #ifdef CPU_IORT
229 	ccr4 |= CPU_IORT & CCR4_IOMASK;
230 #endif
231 	write_cyrix_reg(CCR4, ccr4);
232 
233 	/* Initialize PCR0. */
234 	/****************************************************************
235 	 * WARNING: RSTK_EN and LOOP_EN could make your system unstable.
236 	 * BTB_EN might make your system unstable.
237 	 ****************************************************************/
238 	pcr0 = read_cyrix_reg(PCR0);
239 #ifdef CPU_RSTK_EN
240 	pcr0 |= PCR0_RSTK;
241 #else
242 	pcr0 &= ~PCR0_RSTK;
243 #endif
244 #ifdef CPU_BTB_EN
245 	pcr0 |= PCR0_BTB;
246 #else
247 	pcr0 &= ~PCR0_BTB;
248 #endif
249 #ifdef CPU_LOOP_EN
250 	pcr0 |= PCR0_LOOP;
251 #else
252 	pcr0 &= ~PCR0_LOOP;
253 #endif
254 
255 	/****************************************************************
256 	 * WARNING: if you use a memory mapped I/O device, don't use
257 	 * DISABLE_5X86_LSSER option, which may reorder memory mapped
258 	 * I/O access.
259 	 * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER.
260 	 ****************************************************************/
261 #ifdef CPU_DISABLE_5X86_LSSER
262 	pcr0 &= ~PCR0_LSSER;
263 #else
264 	pcr0 |= PCR0_LSSER;
265 #endif
266 	write_cyrix_reg(PCR0, pcr0);
267 
268 	/* Restore CCR3. */
269 	write_cyrix_reg(CCR3, ccr3);
270 
271 	(void)read_cyrix_reg(0x80);		/* dummy */
272 
273 	/* Unlock NW bit in CR0. */
274 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
275 	load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0, NW = 1 */
276 	/* Lock NW bit in CR0. */
277 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
278 
279 	intr_restore(saveintr);
280 }
281 
282 #ifdef CPU_I486_ON_386
283 /*
284  * There are i486 based upgrade products for i386 machines.
285  * In this case, BIOS doesn't enable CPU cache.
286  */
287 static void
288 init_i486_on_386(void)
289 {
290 	register_t saveintr;
291 
292 	saveintr = intr_disable();
293 
294 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0, NW = 0 */
295 
296 	intr_restore(saveintr);
297 }
298 #endif
299 
300 /*
301  * Cyrix 6x86
302  *
303  * XXX - What should I do here?  Please let me know.
304  */
305 static void
306 init_6x86(void)
307 {
308 	register_t saveintr;
309 	u_char	ccr3, ccr4;
310 
311 	saveintr = intr_disable();
312 
313 	load_cr0(rcr0() | CR0_CD | CR0_NW);
314 	wbinvd();
315 
316 	/* Initialize CCR0. */
317 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
318 
319 	/* Initialize CCR1. */
320 #ifdef CPU_CYRIX_NO_LOCK
321 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
322 #else
323 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
324 #endif
325 
326 	/* Initialize CCR2. */
327 #ifdef CPU_SUSP_HLT
328 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
329 #else
330 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
331 #endif
332 
333 	ccr3 = read_cyrix_reg(CCR3);
334 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
335 
336 	/* Initialize CCR4. */
337 	ccr4 = read_cyrix_reg(CCR4);
338 	ccr4 |= CCR4_DTE;
339 	ccr4 &= ~CCR4_IOMASK;
340 #ifdef CPU_IORT
341 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
342 #else
343 	write_cyrix_reg(CCR4, ccr4 | 7);
344 #endif
345 
346 	/* Initialize CCR5. */
347 #ifdef CPU_WT_ALLOC
348 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
349 #endif
350 
351 	/* Restore CCR3. */
352 	write_cyrix_reg(CCR3, ccr3);
353 
354 	/* Unlock NW bit in CR0. */
355 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
356 
357 	/*
358 	 * Earlier revision of the 6x86 CPU could crash the system if
359 	 * L1 cache is in write-back mode.
360 	 */
361 	if ((cyrix_did & 0xff00) > 0x1600)
362 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
363 	else {
364 		/* Revision 2.6 and lower. */
365 #ifdef CYRIX_CACHE_REALLY_WORKS
366 		load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
367 #else
368 		load_cr0((rcr0() & ~CR0_CD) | CR0_NW);	/* CD = 0 and NW = 1 */
369 #endif
370 	}
371 
372 	/* Lock NW bit in CR0. */
373 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
374 
375 	intr_restore(saveintr);
376 }
377 #endif /* I486_CPU */
378 
379 #ifdef I586_CPU
380 /*
381  * Rise mP6
382  */
383 static void
384 init_rise(void)
385 {
386 
387 	/*
388 	 * The CMPXCHG8B instruction is always available but hidden.
389 	 */
390 	cpu_feature |= CPUID_CX8;
391 }
392 
393 /*
394  * IDT WinChip C6/2/2A/2B/3
395  *
396  * http://www.centtech.com/winchip_bios_writers_guide_v4_0.pdf
397  */
398 static void
399 init_winchip(void)
400 {
401 	u_int regs[4];
402 	uint64_t fcr;
403 
404 	fcr = rdmsr(0x0107);
405 
406 	/*
407 	 * Set ECX8, DSMC, DTLOCK/EDCTLB, EMMX, and ERETSTK and clear DPDC.
408 	 */
409 	fcr |= (1 << 1) | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 16);
410 	fcr &= ~(1ULL << 11);
411 
412 	/*
413 	 * Additionally, set EBRPRED, E2MMX and EAMD3D for WinChip 2 and 3.
414 	 */
415 	if (CPUID_TO_MODEL(cpu_id) >= 8)
416 		fcr |= (1 << 12) | (1 << 19) | (1 << 20);
417 
418 	wrmsr(0x0107, fcr);
419 	do_cpuid(1, regs);
420 	cpu_feature = regs[3];
421 }
422 #endif
423 
424 #ifdef I686_CPU
425 /*
426  * Cyrix 6x86MX (code-named M2)
427  *
428  * XXX - What should I do here?  Please let me know.
429  */
430 static void
431 init_6x86MX(void)
432 {
433 	register_t saveintr;
434 	u_char	ccr3, ccr4;
435 
436 	saveintr = intr_disable();
437 
438 	load_cr0(rcr0() | CR0_CD | CR0_NW);
439 	wbinvd();
440 
441 	/* Initialize CCR0. */
442 	write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1);
443 
444 	/* Initialize CCR1. */
445 #ifdef CPU_CYRIX_NO_LOCK
446 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK);
447 #else
448 	write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK);
449 #endif
450 
451 	/* Initialize CCR2. */
452 #ifdef CPU_SUSP_HLT
453 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT);
454 #else
455 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT);
456 #endif
457 
458 	ccr3 = read_cyrix_reg(CCR3);
459 	write_cyrix_reg(CCR3, CCR3_MAPEN0);
460 
461 	/* Initialize CCR4. */
462 	ccr4 = read_cyrix_reg(CCR4);
463 	ccr4 &= ~CCR4_IOMASK;
464 #ifdef CPU_IORT
465 	write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK));
466 #else
467 	write_cyrix_reg(CCR4, ccr4 | 7);
468 #endif
469 
470 	/* Initialize CCR5. */
471 #ifdef CPU_WT_ALLOC
472 	write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC);
473 #endif
474 
475 	/* Restore CCR3. */
476 	write_cyrix_reg(CCR3, ccr3);
477 
478 	/* Unlock NW bit in CR0. */
479 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW);
480 
481 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));	/* CD = 0 and NW = 0 */
482 
483 	/* Lock NW bit in CR0. */
484 	write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW);
485 
486 	intr_restore(saveintr);
487 }
488 
489 static int ppro_apic_used = -1;
490 
491 static void
492 init_ppro(void)
493 {
494 	u_int64_t	apicbase;
495 
496 	/*
497 	 * Local APIC should be disabled if it is not going to be used.
498 	 */
499 	if (ppro_apic_used != 1) {
500 		apicbase = rdmsr(MSR_APICBASE);
501 		apicbase &= ~APICBASE_ENABLED;
502 		wrmsr(MSR_APICBASE, apicbase);
503 		ppro_apic_used = 0;
504 	}
505 }
506 
507 /*
508  * If the local APIC is going to be used after being disabled above,
509  * re-enable it and don't disable it in the future.
510  */
511 void
512 ppro_reenable_apic(void)
513 {
514 	u_int64_t	apicbase;
515 
516 	if (ppro_apic_used == 0) {
517 		apicbase = rdmsr(MSR_APICBASE);
518 		apicbase |= APICBASE_ENABLED;
519 		wrmsr(MSR_APICBASE, apicbase);
520 		ppro_apic_used = 1;
521 	}
522 }
523 
524 /*
525  * Initialize BBL_CR_CTL3 (Control register 3: used to configure the
526  * L2 cache).
527  */
528 static void
529 init_mendocino(void)
530 {
531 #ifdef CPU_PPRO2CELERON
532 	register_t	saveintr;
533 	u_int64_t	bbl_cr_ctl3;
534 
535 	saveintr = intr_disable();
536 
537 	load_cr0(rcr0() | CR0_CD | CR0_NW);
538 	wbinvd();
539 
540 	bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3);
541 
542 	/* If the L2 cache is configured, do nothing. */
543 	if (!(bbl_cr_ctl3 & 1)) {
544 		bbl_cr_ctl3 = 0x134052bLL;
545 
546 		/* Set L2 Cache Latency (Default: 5). */
547 #ifdef	CPU_CELERON_L2_LATENCY
548 #if CPU_L2_LATENCY > 15
549 #error invalid CPU_L2_LATENCY.
550 #endif
551 		bbl_cr_ctl3 |= CPU_L2_LATENCY << 1;
552 #else
553 		bbl_cr_ctl3 |= 5 << 1;
554 #endif
555 		wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3);
556 	}
557 
558 	load_cr0(rcr0() & ~(CR0_CD | CR0_NW));
559 	intr_restore(saveintr);
560 #endif /* CPU_PPRO2CELERON */
561 }
562 
563 /*
564  * Initialize special VIA features
565  */
566 static void
567 init_via(void)
568 {
569 	u_int regs[4], val;
570 	uint64_t fcr;
571 
572 	/*
573 	 * Explicitly enable CX8 and PGE on C3.
574 	 *
575 	 * http://www.via.com.tw/download/mainboards/6/13/VIA_C3_EBGA%20datasheet110.pdf
576 	 */
577 	if (CPUID_TO_MODEL(cpu_id) <= 9)
578 		fcr = (1 << 1) | (1 << 7);
579 	else
580 		fcr = 0;
581 
582 	/*
583 	 * Check extended CPUID for PadLock features.
584 	 *
585 	 * http://www.via.com.tw/en/downloads/whitepapers/initiatives/padlock/programming_guide.pdf
586 	 */
587 	do_cpuid(0xc0000000, regs);
588 	if (regs[0] >= 0xc0000001) {
589 		do_cpuid(0xc0000001, regs);
590 		val = regs[3];
591 	} else
592 		val = 0;
593 
594 	/* Enable RNG if present. */
595 	if ((val & VIA_CPUID_HAS_RNG) != 0) {
596 		via_feature_rng = VIA_HAS_RNG;
597 		wrmsr(0x110B, rdmsr(0x110B) | VIA_CPUID_DO_RNG);
598 	}
599 
600 	/* Enable PadLock if present. */
601 	if ((val & VIA_CPUID_HAS_ACE) != 0)
602 		via_feature_xcrypt |= VIA_HAS_AES;
603 	if ((val & VIA_CPUID_HAS_ACE2) != 0)
604 		via_feature_xcrypt |= VIA_HAS_AESCTR;
605 	if ((val & VIA_CPUID_HAS_PHE) != 0)
606 		via_feature_xcrypt |= VIA_HAS_SHA;
607 	if ((val & VIA_CPUID_HAS_PMM) != 0)
608 		via_feature_xcrypt |= VIA_HAS_MM;
609 	if (via_feature_xcrypt != 0)
610 		fcr |= 1 << 28;
611 
612 	wrmsr(0x1107, rdmsr(0x1107) | fcr);
613 }
614 
615 #endif /* I686_CPU */
616 
617 #if defined(I586_CPU) || defined(I686_CPU)
618 static void
619 init_transmeta(void)
620 {
621 	u_int regs[0];
622 
623 	/* Expose all hidden features. */
624 	wrmsr(0x80860004, rdmsr(0x80860004) | ~0UL);
625 	do_cpuid(1, regs);
626 	cpu_feature = regs[3];
627 }
628 #endif
629 
630 extern int elf32_nxstack;
631 
632 void
633 initializecpu(void)
634 {
635 
636 	switch (cpu) {
637 #ifdef I486_CPU
638 	case CPU_BLUE:
639 		init_bluelightning();
640 		break;
641 	case CPU_486DLC:
642 		init_486dlc();
643 		break;
644 	case CPU_CY486DX:
645 		init_cy486dx();
646 		break;
647 	case CPU_M1SC:
648 		init_5x86();
649 		break;
650 #ifdef CPU_I486_ON_386
651 	case CPU_486:
652 		init_i486_on_386();
653 		break;
654 #endif
655 	case CPU_M1:
656 		init_6x86();
657 		break;
658 #endif /* I486_CPU */
659 #ifdef I586_CPU
660 	case CPU_586:
661 		switch (cpu_vendor_id) {
662 		case CPU_VENDOR_AMD:
663 #ifdef CPU_WT_ALLOC
664 			if (((cpu_id & 0x0f0) > 0) &&
665 			    ((cpu_id & 0x0f0) < 0x60) &&
666 			    ((cpu_id & 0x00f) > 3))
667 				enable_K5_wt_alloc();
668 			else if (((cpu_id & 0x0f0) > 0x80) ||
669 			    (((cpu_id & 0x0f0) == 0x80) &&
670 				(cpu_id & 0x00f) > 0x07))
671 				enable_K6_2_wt_alloc();
672 			else if ((cpu_id & 0x0f0) > 0x50)
673 				enable_K6_wt_alloc();
674 #endif
675 			if ((cpu_id & 0xf0) == 0xa0)
676 				/*
677 				 * Make sure the TSC runs through
678 				 * suspension, otherwise we can't use
679 				 * it as timecounter
680 				 */
681 				wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL);
682 			break;
683 		case CPU_VENDOR_CENTAUR:
684 			init_winchip();
685 			break;
686 		case CPU_VENDOR_TRANSMETA:
687 			init_transmeta();
688 			break;
689 		case CPU_VENDOR_RISE:
690 			init_rise();
691 			break;
692 		}
693 		break;
694 #endif
695 #ifdef I686_CPU
696 	case CPU_M2:
697 		init_6x86MX();
698 		break;
699 	case CPU_686:
700 		switch (cpu_vendor_id) {
701 		case CPU_VENDOR_INTEL:
702 			switch (cpu_id & 0xff0) {
703 			case 0x610:
704 				init_ppro();
705 				break;
706 			case 0x660:
707 				init_mendocino();
708 				break;
709 			}
710 			break;
711 #ifdef CPU_ATHLON_SSE_HACK
712 		case CPU_VENDOR_AMD:
713 			/*
714 			 * Sometimes the BIOS doesn't enable SSE instructions.
715 			 * According to AMD document 20734, the mobile
716 			 * Duron, the (mobile) Athlon 4 and the Athlon MP
717 			 * support SSE. These correspond to cpu_id 0x66X
718 			 * or 0x67X.
719 			 */
720 			if ((cpu_feature & CPUID_XMM) == 0 &&
721 			    ((cpu_id & ~0xf) == 0x660 ||
722 			     (cpu_id & ~0xf) == 0x670 ||
723 			     (cpu_id & ~0xf) == 0x680)) {
724 				u_int regs[4];
725 				wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000);
726 				do_cpuid(1, regs);
727 				cpu_feature = regs[3];
728 			}
729 			break;
730 #endif
731 		case CPU_VENDOR_CENTAUR:
732 			init_via();
733 			break;
734 		case CPU_VENDOR_TRANSMETA:
735 			init_transmeta();
736 			break;
737 		}
738 		break;
739 #endif
740 	default:
741 		break;
742 	}
743 	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
744 		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
745 		cpu_fxsr = hw_instruction_sse = 1;
746 	}
747 #if defined(PAE) || defined(PAE_TABLES)
748 	if ((amd_feature & AMDID_NX) != 0) {
749 		uint64_t msr;
750 
751 		msr = rdmsr(MSR_EFER) | EFER_NXE;
752 		wrmsr(MSR_EFER, msr);
753 		pg_nx = PG_NX;
754 		elf32_nxstack = 1;
755 	}
756 #endif
757 }
758 
759 void
760 initializecpucache(void)
761 {
762 
763 	/*
764 	 * CPUID with %eax = 1, %ebx returns
765 	 * Bits 15-8: CLFLUSH line size
766 	 * 	(Value * 8 = cache line size in bytes)
767 	 */
768 	if ((cpu_feature & CPUID_CLFSH) != 0)
769 		cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8;
770 	/*
771 	 * XXXKIB: (temporary) hack to work around traps generated
772 	 * when CLFLUSHing APIC register window under virtualization
773 	 * environments.  These environments tend to disable the
774 	 * CPUID_SS feature even though the native CPU supports it.
775 	 */
776 	TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable);
777 	if (vm_guest != VM_GUEST_NO && hw_clflush_disable == -1) {
778 		cpu_feature &= ~CPUID_CLFSH;
779 		cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
780 	}
781 	/*
782 	 * The kernel's use of CLFLUSH{,OPT} can be disabled manually
783 	 * by setting the hw.clflush_disable tunable.
784 	 */
785 	if (hw_clflush_disable == 1) {
786 		cpu_feature &= ~CPUID_CLFSH;
787 		cpu_stdext_feature &= ~CPUID_STDEXT_CLFLUSHOPT;
788 	}
789 }
790 
791 #if defined(I586_CPU) && defined(CPU_WT_ALLOC)
792 /*
793  * Enable write allocate feature of AMD processors.
794  * Following two functions require the Maxmem variable being set.
795  */
796 static void
797 enable_K5_wt_alloc(void)
798 {
799 	u_int64_t	msr;
800 	register_t	saveintr;
801 
802 	/*
803 	 * Write allocate is supported only on models 1, 2, and 3, with
804 	 * a stepping of 4 or greater.
805 	 */
806 	if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) {
807 		saveintr = intr_disable();
808 		msr = rdmsr(0x83);		/* HWCR */
809 		wrmsr(0x83, msr & !(0x10));
810 
811 		/*
812 		 * We have to tell the chip where the top of memory is,
813 		 * since video cards could have frame bufferes there,
814 		 * memory-mapped I/O could be there, etc.
815 		 */
816 		if(Maxmem > 0)
817 		  msr = Maxmem / 16;
818 		else
819 		  msr = 0;
820 		msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE;
821 
822 		/*
823 		 * There is no way to know wheter 15-16M hole exists or not.
824 		 * Therefore, we disable write allocate for this range.
825 		 */
826 		wrmsr(0x86, 0x0ff00f0);
827 		msr |= AMD_WT_ALLOC_PRE;
828 		wrmsr(0x85, msr);
829 
830 		msr=rdmsr(0x83);
831 		wrmsr(0x83, msr|0x10); /* enable write allocate */
832 		intr_restore(saveintr);
833 	}
834 }
835 
836 static void
837 enable_K6_wt_alloc(void)
838 {
839 	quad_t	size;
840 	u_int64_t	whcr;
841 	register_t	saveintr;
842 
843 	saveintr = intr_disable();
844 	wbinvd();
845 
846 #ifdef CPU_DISABLE_CACHE
847 	/*
848 	 * Certain K6-2 box becomes unstable when write allocation is
849 	 * enabled.
850 	 */
851 	/*
852 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
853 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
854 	 * All other bits in TR12 have no effect on the processer's operation.
855 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
856 	 * on the AMD-K6.
857 	 */
858 	wrmsr(0x0000000e, (u_int64_t)0x0008);
859 #endif
860 	/* Don't assume that memory size is aligned with 4M. */
861 	if (Maxmem > 0)
862 	  size = ((Maxmem >> 8) + 3) >> 2;
863 	else
864 	  size = 0;
865 
866 	/* Limit is 508M bytes. */
867 	if (size > 0x7f)
868 		size = 0x7f;
869 	whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1);
870 
871 #if defined(NO_MEMORY_HOLE)
872 	if (whcr & (0x7fLL << 1))
873 		whcr |=  0x0001LL;
874 #else
875 	/*
876 	 * There is no way to know wheter 15-16M hole exists or not.
877 	 * Therefore, we disable write allocate for this range.
878 	 */
879 	whcr &= ~0x0001LL;
880 #endif
881 	wrmsr(0x0c0000082, whcr);
882 
883 	intr_restore(saveintr);
884 }
885 
886 static void
887 enable_K6_2_wt_alloc(void)
888 {
889 	quad_t	size;
890 	u_int64_t	whcr;
891 	register_t	saveintr;
892 
893 	saveintr = intr_disable();
894 	wbinvd();
895 
896 #ifdef CPU_DISABLE_CACHE
897 	/*
898 	 * Certain K6-2 box becomes unstable when write allocation is
899 	 * enabled.
900 	 */
901 	/*
902 	 * The AMD-K6 processer provides the 64-bit Test Register 12(TR12),
903 	 * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported.
904 	 * All other bits in TR12 have no effect on the processer's operation.
905 	 * The I/O Trap Restart function (bit 9 of TR12) is always enabled
906 	 * on the AMD-K6.
907 	 */
908 	wrmsr(0x0000000e, (u_int64_t)0x0008);
909 #endif
910 	/* Don't assume that memory size is aligned with 4M. */
911 	if (Maxmem > 0)
912 	  size = ((Maxmem >> 8) + 3) >> 2;
913 	else
914 	  size = 0;
915 
916 	/* Limit is 4092M bytes. */
917 	if (size > 0x3fff)
918 		size = 0x3ff;
919 	whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22);
920 
921 #if defined(NO_MEMORY_HOLE)
922 	if (whcr & (0x3ffLL << 22))
923 		whcr |=  1LL << 16;
924 #else
925 	/*
926 	 * There is no way to know wheter 15-16M hole exists or not.
927 	 * Therefore, we disable write allocate for this range.
928 	 */
929 	whcr &= ~(1LL << 16);
930 #endif
931 	wrmsr(0x0c0000082, whcr);
932 
933 	intr_restore(saveintr);
934 }
935 #endif /* I585_CPU && CPU_WT_ALLOC */
936 
937 #include "opt_ddb.h"
938 #ifdef DDB
939 #include <ddb/ddb.h>
940 
941 DB_SHOW_COMMAND(cyrixreg, cyrixreg)
942 {
943 	register_t saveintr;
944 	u_int	cr0;
945 	u_char	ccr1, ccr2, ccr3;
946 	u_char	ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0;
947 
948 	cr0 = rcr0();
949 	if (cpu_vendor_id == CPU_VENDOR_CYRIX) {
950 		saveintr = intr_disable();
951 
952 
953 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) {
954 			ccr0 = read_cyrix_reg(CCR0);
955 		}
956 		ccr1 = read_cyrix_reg(CCR1);
957 		ccr2 = read_cyrix_reg(CCR2);
958 		ccr3 = read_cyrix_reg(CCR3);
959 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
960 			write_cyrix_reg(CCR3, CCR3_MAPEN0);
961 			ccr4 = read_cyrix_reg(CCR4);
962 			if ((cpu == CPU_M1) || (cpu == CPU_M2))
963 				ccr5 = read_cyrix_reg(CCR5);
964 			else
965 				pcr0 = read_cyrix_reg(PCR0);
966 			write_cyrix_reg(CCR3, ccr3);		/* Restore CCR3. */
967 		}
968 		intr_restore(saveintr);
969 
970 		if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX))
971 			printf("CCR0=%x, ", (u_int)ccr0);
972 
973 		printf("CCR1=%x, CCR2=%x, CCR3=%x",
974 			(u_int)ccr1, (u_int)ccr2, (u_int)ccr3);
975 		if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) {
976 			printf(", CCR4=%x, ", (u_int)ccr4);
977 			if (cpu == CPU_M1SC)
978 				printf("PCR0=%x\n", pcr0);
979 			else
980 				printf("CCR5=%x\n", ccr5);
981 		}
982 	}
983 	printf("CR0=%x\n", cr0);
984 }
985 #endif /* DDB */
986