xref: /openbsd/sys/arch/amd64/amd64/cpu.c (revision 898184e3)
1 /*	$OpenBSD: cpu.c,v 1.54 2012/11/02 15:10:28 jsg Exp $	*/
2 /* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
3 
4 /*-
5  * Copyright (c) 2000 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by RedBack Networks Inc.
10  *
11  * Author: Bill Sommerfeld
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
23  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
24  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
25  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
26  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Copyright (c) 1999 Stefan Grefen
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *      This product includes software developed by the NetBSD
49  *      Foundation, Inc. and its contributors.
50  * 4. Neither the name of The NetBSD Foundation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY
55  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR AND CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  */
66 
67 #include "lapic.h"
68 #include "ioapic.h"
69 
70 #include <sys/param.h>
71 #include <sys/proc.h>
72 #include <sys/systm.h>
73 #include <sys/device.h>
74 #include <sys/malloc.h>
75 #include <sys/memrange.h>
76 
77 #include <uvm/uvm_extern.h>
78 
79 #include <machine/cpu.h>
80 #include <machine/cpufunc.h>
81 #include <machine/cpuvar.h>
82 #include <machine/pmap.h>
83 #include <machine/vmparam.h>
84 #include <machine/mpbiosvar.h>
85 #include <machine/pcb.h>
86 #include <machine/specialreg.h>
87 #include <machine/segments.h>
88 #include <machine/gdt.h>
89 #include <machine/pio.h>
90 
91 #if NLAPIC > 0
92 #include <machine/apicvar.h>
93 #include <machine/i82489reg.h>
94 #include <machine/i82489var.h>
95 #endif
96 
97 #if NIOAPIC > 0
98 #include <machine/i82093var.h>
99 #endif
100 
101 #include <dev/ic/mc146818reg.h>
102 #include <amd64/isa/nvram.h>
103 #include <dev/isa/isareg.h>
104 #include <dev/rndvar.h>
105 
106 int     cpu_match(struct device *, void *, void *);
107 void    cpu_attach(struct device *, struct device *, void *);
108 void	patinit(struct cpu_info *ci);
109 
110 struct cpu_softc {
111 	struct device sc_dev;		/* device tree glue */
112 	struct cpu_info *sc_info;	/* pointer to CPU info */
113 };
114 
115 #ifndef SMALL_KERNEL
116 void	replacesmap(void);
117 
118 extern long _copyout_stac;
119 extern long _copyout_clac;
120 extern long _copyin_stac;
121 extern long _copyin_clac;
122 extern long _copy_fault_clac;
123 extern long _copyoutstr_stac;
124 extern long _copyinstr_stac;
125 extern long _copystr_fault_clac;
126 extern long _stac;
127 extern long _clac;
128 
129 static const struct {
130 	void *daddr;
131 	void *saddr;
132 } ireplace[] = {
133 	{ &_copyout_stac, &_stac },
134 	{ &_copyout_clac, &_clac },
135 	{ &_copyin_stac, &_stac },
136 	{ &_copyin_clac, &_clac },
137 	{ &_copy_fault_clac, &_clac },
138 	{ &_copyoutstr_stac, &_stac },
139 	{ &_copyinstr_stac, &_stac },
140 	{ &_copystr_fault_clac, &_clac },
141 };
142 
143 void
144 replacesmap(void)
145 {
146 	static int replacedone = 0;
147 	int i, s;
148 	vaddr_t nva;
149 
150 	if (replacedone)
151 		return;
152 	replacedone = 1;
153 
154 	s = splhigh();
155 	/*
156 	 * Create writeable aliases of memory we need
157 	 * to write to as kernel is mapped read-only
158 	 */
159 	nva = (vaddr_t)km_alloc(2 * PAGE_SIZE, &kv_any, &kp_none, &kd_waitok);
160 
161 	for (i = 0; i < nitems(ireplace); i++) {
162 		paddr_t kva = trunc_page((paddr_t)ireplace[i].daddr);
163 		paddr_t po = (paddr_t)ireplace[i].daddr & PAGE_MASK;
164 		paddr_t pa1, pa2;
165 
166 		pmap_extract(pmap_kernel(), kva, &pa1);
167 		pmap_extract(pmap_kernel(), kva + PAGE_SIZE, &pa2);
168 		pmap_kenter_pa(nva, pa1, VM_PROT_READ | VM_PROT_WRITE);
169 		pmap_kenter_pa(nva + PAGE_SIZE, pa2, VM_PROT_READ |
170 		    VM_PROT_WRITE);
171 		pmap_update(pmap_kernel());
172 
173 		/* replace 3 byte nops with stac/clac instructions */
174 		bcopy(ireplace[i].saddr, (void *)(nva + po), 3);
175 	}
176 
177 	km_free((void *)nva, 2 * PAGE_SIZE, &kv_any, &kp_none);
178 
179 	splx(s);
180 }
181 #endif /* !SMALL_KERNEL */
182 
183 #ifdef MULTIPROCESSOR
184 int mp_cpu_start(struct cpu_info *);
185 void mp_cpu_start_cleanup(struct cpu_info *);
186 struct cpu_functions mp_cpu_funcs = { mp_cpu_start, NULL,
187 				      mp_cpu_start_cleanup };
188 #endif /* MULTIPROCESSOR */
189 
190 struct cfattach cpu_ca = {
191 	sizeof(struct cpu_softc), cpu_match, cpu_attach
192 };
193 
194 struct cfdriver cpu_cd = {
195 	NULL, "cpu", DV_DULL
196 };
197 
198 /*
199  * Statically-allocated CPU info for the primary CPU (or the only
200  * CPU, on uniprocessors).  The CPU info list is initialized to
201  * point at it.
202  */
203 struct cpu_info cpu_info_primary = { 0, &cpu_info_primary };
204 
205 struct cpu_info *cpu_info_list = &cpu_info_primary;
206 
207 #ifdef MULTIPROCESSOR
208 /*
209  * Array of CPU info structures.  Must be statically-allocated because
210  * curproc, etc. are used early.
211  */
212 struct cpu_info *cpu_info[MAXCPUS] = { &cpu_info_primary };
213 
214 void    	cpu_hatch(void *);
215 void    	cpu_boot_secondary(struct cpu_info *ci);
216 void    	cpu_start_secondary(struct cpu_info *ci);
217 void		cpu_copy_trampoline(void);
218 
219 /*
220  * Runs once per boot once multiprocessor goo has been detected and
221  * the local APIC on the boot processor has been mapped.
222  *
223  * Called from lapic_boot_init() (from mpbios_scan()).
224  */
225 void
226 cpu_init_first(void)
227 {
228 	cpu_copy_trampoline();
229 }
230 #endif
231 
232 int
233 cpu_match(struct device *parent, void *match, void *aux)
234 {
235 	struct cfdata *cf = match;
236 	struct cpu_attach_args *caa = aux;
237 
238 	if (strcmp(caa->caa_name, cf->cf_driver->cd_name) != 0)
239 		return 0;
240 
241 	if (cf->cf_unit >= MAXCPUS)
242 		return 0;
243 
244 	return 1;
245 }
246 
247 static void
248 cpu_vm_init(struct cpu_info *ci)
249 {
250 	int ncolors = 2, i;
251 
252 	for (i = CAI_ICACHE; i <= CAI_L2CACHE; i++) {
253 		struct x86_cache_info *cai;
254 		int tcolors;
255 
256 		cai = &ci->ci_cinfo[i];
257 
258 		tcolors = atop(cai->cai_totalsize);
259 		switch(cai->cai_associativity) {
260 		case 0xff:
261 			tcolors = 1; /* fully associative */
262 			break;
263 		case 0:
264 		case 1:
265 			break;
266 		default:
267 			tcolors /= cai->cai_associativity;
268 		}
269 		ncolors = max(ncolors, tcolors);
270 	}
271 
272 #ifdef notyet
273 	/*
274 	 * Knowing the size of the largest cache on this CPU, re-color
275 	 * our pages.
276 	 */
277 	if (ncolors <= uvmexp.ncolors)
278 		return;
279 	printf("%s: %d page colors\n", ci->ci_dev->dv_xname, ncolors);
280 	uvm_page_recolor(ncolors);
281 #endif
282 }
283 
284 
285 void
286 cpu_attach(struct device *parent, struct device *self, void *aux)
287 {
288 	struct cpu_softc *sc = (void *) self;
289 	struct cpu_attach_args *caa = aux;
290 	struct cpu_info *ci;
291 #if defined(MULTIPROCESSOR)
292 	int cpunum = sc->sc_dev.dv_unit;
293 	vaddr_t kstack;
294 	struct pcb *pcb;
295 #endif
296 
297 	/*
298 	 * If we're an Application Processor, allocate a cpu_info
299 	 * structure, otherwise use the primary's.
300 	 */
301 	if (caa->cpu_role == CPU_ROLE_AP) {
302 		ci = malloc(sizeof(*ci), M_DEVBUF, M_WAITOK|M_ZERO);
303 #if defined(MULTIPROCESSOR)
304 		if (cpu_info[cpunum] != NULL)
305 			panic("cpu at apic id %d already attached?", cpunum);
306 		cpu_info[cpunum] = ci;
307 #endif
308 #ifdef TRAPLOG
309 		ci->ci_tlog_base = malloc(sizeof(struct tlog),
310 		    M_DEVBUF, M_WAITOK);
311 #endif
312 	} else {
313 		ci = &cpu_info_primary;
314 #if defined(MULTIPROCESSOR)
315 		if (caa->cpu_number != lapic_cpu_number()) {
316 			panic("%s: running cpu is at apic %d"
317 			    " instead of at expected %d",
318 			    sc->sc_dev.dv_xname, lapic_cpu_number(), caa->cpu_number);
319 		}
320 #endif
321 	}
322 
323 	ci->ci_self = ci;
324 	sc->sc_info = ci;
325 
326 	ci->ci_dev = self;
327 	ci->ci_apicid = caa->cpu_number;
328 #ifdef MULTIPROCESSOR
329 	ci->ci_cpuid = cpunum;
330 #else
331 	ci->ci_cpuid = 0;	/* False for APs, but they're not used anyway */
332 #endif
333 	ci->ci_func = caa->cpu_func;
334 
335 	simple_lock_init(&ci->ci_slock);
336 
337 #if defined(MULTIPROCESSOR)
338 	/*
339 	 * Allocate UPAGES contiguous pages for the idle PCB and stack.
340 	 */
341 	kstack = uvm_km_alloc (kernel_map, USPACE);
342 	if (kstack == 0) {
343 		if (caa->cpu_role != CPU_ROLE_AP) {
344 			panic("cpu_attach: unable to allocate idle stack for"
345 			    " primary");
346 		}
347 		printf("%s: unable to allocate idle stack\n",
348 		    sc->sc_dev.dv_xname);
349 		return;
350 	}
351 	pcb = ci->ci_idle_pcb = (struct pcb *) kstack;
352 	memset(pcb, 0, USPACE);
353 
354 	pcb->pcb_kstack = kstack + USPACE - 16;
355 	pcb->pcb_rbp = pcb->pcb_rsp = kstack + USPACE - 16;
356 	pcb->pcb_pmap = pmap_kernel();
357 	pcb->pcb_cr0 = rcr0();
358 	pcb->pcb_cr3 = pcb->pcb_pmap->pm_pdirpa;
359 #endif
360 
361 	/* further PCB init done later. */
362 
363 	printf(": ");
364 
365 	switch (caa->cpu_role) {
366 	case CPU_ROLE_SP:
367 		printf("(uniprocessor)\n");
368 		ci->ci_flags |= CPUF_PRESENT | CPUF_SP | CPUF_PRIMARY;
369 		cpu_intr_init(ci);
370 		identifycpu(ci);
371 		cpu_init(ci);
372 		break;
373 
374 	case CPU_ROLE_BP:
375 		printf("apid %d (boot processor)\n", caa->cpu_number);
376 		ci->ci_flags |= CPUF_PRESENT | CPUF_BSP | CPUF_PRIMARY;
377 		cpu_intr_init(ci);
378 		identifycpu(ci);
379 		cpu_init(ci);
380 
381 #if NLAPIC > 0
382 		/*
383 		 * Enable local apic
384 		 */
385 		lapic_enable();
386 		lapic_calibrate_timer(ci);
387 #endif
388 #if NIOAPIC > 0
389 		ioapic_bsp_id = caa->cpu_number;
390 #endif
391 		break;
392 
393 	case CPU_ROLE_AP:
394 		/*
395 		 * report on an AP
396 		 */
397 		printf("apid %d (application processor)\n", caa->cpu_number);
398 
399 #if defined(MULTIPROCESSOR)
400 		cpu_intr_init(ci);
401 		gdt_alloc_cpu(ci);
402 		sched_init_cpu(ci);
403 		cpu_start_secondary(ci);
404 		ncpus++;
405 		if (ci->ci_flags & CPUF_PRESENT) {
406 			ci->ci_next = cpu_info_list->ci_next;
407 			cpu_info_list->ci_next = ci;
408 		}
409 #else
410 		printf("%s: not started\n", sc->sc_dev.dv_xname);
411 #endif
412 		break;
413 
414 	default:
415 		panic("unknown processor type??");
416 	}
417 	cpu_vm_init(ci);
418 
419 #if defined(MULTIPROCESSOR)
420 	if (mp_verbose) {
421 		printf("%s: kstack at 0x%lx for %d bytes\n",
422 		    sc->sc_dev.dv_xname, kstack, USPACE);
423 		printf("%s: idle pcb at %p, idle sp at 0x%lx\n",
424 		    sc->sc_dev.dv_xname, pcb, pcb->pcb_rsp);
425 	}
426 #endif
427 }
428 
429 /*
430  * Initialize the processor appropriately.
431  */
432 
433 void
434 cpu_init(struct cpu_info *ci)
435 {
436 	/* configure the CPU if needed */
437 	if (ci->cpu_setup != NULL)
438 		(*ci->cpu_setup)(ci);
439 	/*
440 	 * We do this here after identifycpu() because errata may affect
441 	 * what we do.
442 	 */
443 	patinit(ci);
444 
445 	lcr0(rcr0() | CR0_WP);
446 	lcr4(rcr4() | CR4_DEFAULT |
447 	    (ci->ci_feature_sefflags & SEFF0EBX_SMEP ? CR4_SMEP : 0));
448 #ifndef SMALL_KERNEL
449 	if (ci->ci_feature_sefflags & SEFF0EBX_SMAP)
450 		lcr4(rcr4() | CR4_SMAP);
451 #endif
452 
453 #ifdef MULTIPROCESSOR
454 	ci->ci_flags |= CPUF_RUNNING;
455 	tlbflushg();
456 #endif
457 }
458 
459 
460 #ifdef MULTIPROCESSOR
461 void
462 cpu_boot_secondary_processors(void)
463 {
464 	struct cpu_info *ci;
465 	u_long i;
466 
467 	for (i=0; i < MAXCPUS; i++) {
468 		ci = cpu_info[i];
469 		if (ci == NULL)
470 			continue;
471 		if (ci->ci_idle_pcb == NULL)
472 			continue;
473 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
474 			continue;
475 		if (ci->ci_flags & (CPUF_BSP|CPUF_SP|CPUF_PRIMARY))
476 			continue;
477 		ci->ci_randseed = random();
478 		cpu_boot_secondary(ci);
479 	}
480 }
481 
482 void
483 cpu_init_idle_pcbs(void)
484 {
485 	struct cpu_info *ci;
486 	u_long i;
487 
488 	for (i=0; i < MAXCPUS; i++) {
489 		ci = cpu_info[i];
490 		if (ci == NULL)
491 			continue;
492 		if (ci->ci_idle_pcb == NULL)
493 			continue;
494 		if ((ci->ci_flags & CPUF_PRESENT) == 0)
495 			continue;
496 		x86_64_init_pcb_tss_ldt(ci);
497 	}
498 }
499 
500 void
501 cpu_start_secondary(struct cpu_info *ci)
502 {
503 	int i;
504 
505 	ci->ci_flags |= CPUF_AP;
506 
507 	CPU_STARTUP(ci);
508 
509 	/*
510 	 * wait for it to become ready
511 	 */
512 	for (i = 100000; (!(ci->ci_flags & CPUF_PRESENT)) && i>0;i--) {
513 		delay(10);
514 	}
515 	if (! (ci->ci_flags & CPUF_PRESENT)) {
516 		printf("%s: failed to become ready\n", ci->ci_dev->dv_xname);
517 #if defined(MPDEBUG) && defined(DDB)
518 		printf("dropping into debugger; continue from here to resume boot\n");
519 		Debugger();
520 #endif
521 	}
522 
523 	if ((ci->ci_flags & CPUF_IDENTIFIED) == 0) {
524 		atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFY);
525 
526 		/* wait for it to identify */
527 		for (i = 100000; (ci->ci_flags & CPUF_IDENTIFY) && i > 0; i--)
528 			delay(10);
529 
530 		if (ci->ci_flags & CPUF_IDENTIFY)
531 			printf("%s: failed to identify\n",
532 			    ci->ci_dev->dv_xname);
533 	}
534 
535 	CPU_START_CLEANUP(ci);
536 }
537 
538 void
539 cpu_boot_secondary(struct cpu_info *ci)
540 {
541 	int i;
542 
543 	atomic_setbits_int(&ci->ci_flags, CPUF_GO);
544 
545 	for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i>0;i--) {
546 		delay(10);
547 	}
548 	if (! (ci->ci_flags & CPUF_RUNNING)) {
549 		printf("cpu failed to start\n");
550 #if defined(MPDEBUG) && defined(DDB)
551 		printf("dropping into debugger; continue from here to resume boot\n");
552 		Debugger();
553 #endif
554 	}
555 }
556 
557 /*
558  * The CPU ends up here when its ready to run
559  * This is called from code in mptramp.s; at this point, we are running
560  * in the idle pcb/idle stack of the new cpu.  When this function returns,
561  * this processor will enter the idle loop and start looking for work.
562  *
563  * XXX should share some of this with init386 in machdep.c
564  */
565 void
566 cpu_hatch(void *v)
567 {
568 	struct cpu_info *ci = (struct cpu_info *)v;
569 	int s;
570 
571 	cpu_init_msrs(ci);
572 
573 #ifdef DEBUG
574 	if (ci->ci_flags & CPUF_PRESENT)
575 		panic("%s: already running!?", ci->ci_dev->dv_xname);
576 #endif
577 
578 	ci->ci_flags |= CPUF_PRESENT;
579 
580 	lapic_enable();
581 	lapic_startclock();
582 
583 	if ((ci->ci_flags & CPUF_IDENTIFIED) == 0) {
584 		/*
585 		 * We need to wait until we can identify, otherwise dmesg
586 		 * output will be messy.
587 		 */
588 		while ((ci->ci_flags & CPUF_IDENTIFY) == 0)
589 			delay(10);
590 
591 		identifycpu(ci);
592 
593 		/* Signal we're done */
594 		atomic_clearbits_int(&ci->ci_flags, CPUF_IDENTIFY);
595 		/* Prevent identifycpu() from running again */
596 		atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFIED);
597 	}
598 
599 	while ((ci->ci_flags & CPUF_GO) == 0)
600 		delay(10);
601 #ifdef DEBUG
602 	if (ci->ci_flags & CPUF_RUNNING)
603 		panic("%s: already running!?", ci->ci_dev->dv_xname);
604 #endif
605 
606 	lcr0(ci->ci_idle_pcb->pcb_cr0);
607 	cpu_init_idt();
608 	lapic_set_lvt();
609 	gdt_init_cpu(ci);
610 	fpuinit(ci);
611 
612 	lldt(0);
613 
614 	/* Re-initialise memory range handling on AP */
615 	if (mem_range_softc.mr_op != NULL)
616 		mem_range_softc.mr_op->initAP(&mem_range_softc);
617 
618 	cpu_init(ci);
619 
620 	s = splhigh();
621 	lcr8(0);
622 	enable_intr();
623 
624 	microuptime(&ci->ci_schedstate.spc_runtime);
625 	splx(s);
626 
627 	SCHED_LOCK(s);
628 	cpu_switchto(NULL, sched_chooseproc());
629 }
630 
631 #if defined(DDB)
632 
633 #include <ddb/db_output.h>
634 #include <machine/db_machdep.h>
635 
636 /*
637  * Dump cpu information from ddb.
638  */
639 void
640 cpu_debug_dump(void)
641 {
642 	struct cpu_info *ci;
643 	CPU_INFO_ITERATOR cii;
644 
645 	db_printf("addr		dev	id	flags	ipis	curproc		fpcurproc\n");
646 	CPU_INFO_FOREACH(cii, ci) {
647 		db_printf("%p	%s	%u	%x	%x	%10p	%10p\n",
648 		    ci,
649 		    ci->ci_dev == NULL ? "BOOT" : ci->ci_dev->dv_xname,
650 		    ci->ci_cpuid,
651 		    ci->ci_flags, ci->ci_ipis,
652 		    ci->ci_curproc,
653 		    ci->ci_fpcurproc);
654 	}
655 }
656 #endif
657 
658 void
659 cpu_copy_trampoline(void)
660 {
661 	/*
662 	 * Copy boot code.
663 	 */
664 	extern u_char cpu_spinup_trampoline[];
665 	extern u_char cpu_spinup_trampoline_end[];
666 
667 	extern u_int32_t mp_pdirpa;
668 	extern paddr_t tramp_pdirpa;
669 
670 	memcpy((caddr_t)MP_TRAMPOLINE,
671 	    cpu_spinup_trampoline,
672 	    cpu_spinup_trampoline_end-cpu_spinup_trampoline);
673 
674 	/*
675 	 * We need to patch this after we copy the trampoline,
676 	 * the symbol points into the copied trampoline.
677 	 */
678 	mp_pdirpa = tramp_pdirpa;
679 }
680 
681 
682 int
683 mp_cpu_start(struct cpu_info *ci)
684 {
685 #if NLAPIC > 0
686 	int error;
687 #endif
688 	unsigned short dwordptr[2];
689 
690 	/*
691 	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
692 	 */
693 
694 	outb(IO_RTC, NVRAM_RESET);
695 	outb(IO_RTC+1, NVRAM_RESET_JUMP);
696 
697 	/*
698 	 * "and the warm reset vector (DWORD based at 40:67) to point
699 	 * to the AP startup code ..."
700 	 */
701 
702 	dwordptr[0] = 0;
703 	dwordptr[1] = MP_TRAMPOLINE >> 4;
704 
705 	pmap_kenter_pa(0, 0, VM_PROT_READ|VM_PROT_WRITE);
706 	memcpy((u_int8_t *) 0x467, dwordptr, 4);
707 	pmap_kremove(0, PAGE_SIZE);
708 
709 #if NLAPIC > 0
710 	/*
711 	 * ... prior to executing the following sequence:"
712 	 */
713 
714 	if (ci->ci_flags & CPUF_AP) {
715 		if ((error = x86_ipi_init(ci->ci_apicid)) != 0)
716 			return error;
717 
718 		delay(10000);
719 
720 		if (cpu_feature & CPUID_APIC) {
721 			if ((error = x86_ipi(MP_TRAMPOLINE/PAGE_SIZE,
722 					     ci->ci_apicid,
723 					     LAPIC_DLMODE_STARTUP)) != 0)
724 				return error;
725 			delay(200);
726 
727 			if ((error = x86_ipi(MP_TRAMPOLINE/PAGE_SIZE,
728 					     ci->ci_apicid,
729 					     LAPIC_DLMODE_STARTUP)) != 0)
730 				return error;
731 			delay(200);
732 		}
733 	}
734 #endif
735 	return 0;
736 }
737 
738 void
739 mp_cpu_start_cleanup(struct cpu_info *ci)
740 {
741 	/*
742 	 * Ensure the NVRAM reset byte contains something vaguely sane.
743 	 */
744 
745 	outb(IO_RTC, NVRAM_RESET);
746 	outb(IO_RTC+1, NVRAM_RESET_RST);
747 }
748 #endif	/* MULTIPROCESSOR */
749 
750 typedef void (vector)(void);
751 extern vector Xsyscall, Xsyscall32;
752 
753 void
754 cpu_init_msrs(struct cpu_info *ci)
755 {
756 	wrmsr(MSR_STAR,
757 	    ((uint64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) |
758 	    ((uint64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48));
759 	wrmsr(MSR_LSTAR, (uint64_t)Xsyscall);
760 	wrmsr(MSR_CSTAR, (uint64_t)Xsyscall32);
761 	wrmsr(MSR_SFMASK, PSL_NT|PSL_T|PSL_I|PSL_C);
762 
763 	wrmsr(MSR_FSBASE, 0);
764 	wrmsr(MSR_GSBASE, (u_int64_t)ci);
765 	wrmsr(MSR_KERNELGSBASE, 0);
766 
767 	if (cpu_feature & CPUID_NXE)
768 		wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NXE);
769 }
770 
771 void
772 patinit(struct cpu_info *ci)
773 {
774 	extern int	pmap_pg_wc;
775 	u_int64_t	reg;
776 
777 	if ((ci->ci_feature_flags & CPUID_PAT) == 0)
778 		return;
779 #define	PATENTRY(n, type)	(type << ((n) * 8))
780 #define	PAT_UC		0x0UL
781 #define	PAT_WC		0x1UL
782 #define	PAT_WT		0x4UL
783 #define	PAT_WP		0x5UL
784 #define	PAT_WB		0x6UL
785 #define	PAT_UCMINUS	0x7UL
786 	/*
787 	 * Set up PAT bits.
788 	 * The default pat table is the following:
789 	 * WB, WT, UC- UC, WB, WT, UC-, UC
790 	 * We change it to:
791 	 * WB, WC, UC-, UC, WB, WC, UC-, UC.
792 	 * i.e change the WT bit to be WC.
793 	 */
794 	reg = PATENTRY(0, PAT_WB) | PATENTRY(1, PAT_WC) |
795 	    PATENTRY(2, PAT_UCMINUS) | PATENTRY(3, PAT_UC) |
796 	    PATENTRY(4, PAT_WB) | PATENTRY(5, PAT_WC) |
797 	    PATENTRY(6, PAT_UCMINUS) | PATENTRY(7, PAT_UC);
798 
799 	wrmsr(MSR_CR_PAT, reg);
800 	pmap_pg_wc = PG_WC;
801 }
802 
803 struct timeout rdrand_tmo;
804 void rdrand(void *);
805 
806 void
807 rdrand(void *v)
808 {
809 	struct timeout *tmo = v;
810 	union {
811 		uint64_t u64;
812 		uint32_t u32[2];
813 	} r;
814 	uint64_t valid;
815 	int i;
816 
817 	for (i = 0; i < 2; i++) {
818 		__asm __volatile(
819 		    "xor	%1, %1\n\t"
820 		    "rdrand	%0\n\t"
821 		    "rcl	$1, %1\n"
822 		    : "=r" (r.u64), "=r" (valid) );
823 
824 		if (valid) {
825 			add_true_randomness(r.u32[0]);
826 			add_true_randomness(r.u32[1]);
827 		}
828 	}
829 
830 	timeout_add_msec(tmo, 10);
831 }
832