xref: /illumos-gate/usr/src/uts/i86pc/os/mp_pc.c (revision 19397407)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Welcome to the world of the "real mode platter".
30  * See also startup.c, mpcore.s and apic.c for related routines.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/cpuvar.h>
36 #include <sys/kmem.h>
37 #include <sys/archsystm.h>
38 #include <sys/machsystm.h>
39 #include <sys/controlregs.h>
40 #include <sys/x86_archext.h>
41 #include <sys/smp_impldefs.h>
42 #include <sys/sysmacros.h>
43 #include <sys/mach_mmu.h>
44 #include <sys/promif.h>
45 #include <sys/cpu.h>
46 #include <vm/hat_i86.h>
47 
48 extern void real_mode_start(void);
49 extern void real_mode_end(void);
50 extern void *(*cpu_pause_func)(void *);
51 
52 void rmp_gdt_init(rm_platter_t *);
53 
54 /*
55  * Fill up the real mode platter to make it easy for real mode code to
56  * kick it off. This area should really be one passed by boot to kernel
57  * and guaranteed to be below 1MB and aligned to 16 bytes. Should also
58  * have identical physical and virtual address in paged mode.
59  */
60 static ushort_t *warm_reset_vector = NULL;
61 
62 int
63 mach_cpucontext_init(void)
64 {
65 	ushort_t *vec;
66 
67 	if (!(vec = (ushort_t *)psm_map_phys(WARM_RESET_VECTOR,
68 	    sizeof (vec), PROT_READ | PROT_WRITE)))
69 		return (-1);
70 	/*
71 	 * setup secondary cpu bios boot up vector
72 	 */
73 	*vec = (ushort_t)((caddr_t)
74 	    ((struct rm_platter *)rm_platter_va)->rm_code - rm_platter_va
75 	    + ((ulong_t)rm_platter_va & 0xf));
76 	vec[1] = (ushort_t)(rm_platter_pa >> 4);
77 	warm_reset_vector = vec;
78 
79 	bcopy((caddr_t)real_mode_start,
80 	    (caddr_t)((rm_platter_t *)rm_platter_va)->rm_code,
81 	    (size_t)real_mode_end - (size_t)real_mode_start);
82 
83 	return (0);
84 }
85 
86 void
87 mach_cpucontext_fini(void)
88 {
89 	if (warm_reset_vector)
90 		psm_unmap_phys((caddr_t)warm_reset_vector,
91 		    sizeof (warm_reset_vector));
92 	hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
93 	    HAT_UNLOAD);
94 }
95 
96 #if defined(__amd64)
97 extern void *long_mode_64(void);
98 #endif	/* __amd64 */
99 
100 void *
101 mach_cpucontext_alloc(struct cpu *cp)
102 {
103 	rm_platter_t *rm = (rm_platter_t *)rm_platter_va;
104 	struct cpu_tables *ct;
105 	struct tss *ntss;
106 
107 	/*
108 	 * Allocate space for stack, tss, gdt and idt. We round the size
109 	 * alloated for cpu_tables up, so that the TSS is on a unique page.
110 	 * This is more efficient when running in virtual machines.
111 	 */
112 	ct = kmem_zalloc(P2ROUNDUP(sizeof (*ct), PAGESIZE), KM_SLEEP);
113 	if ((uintptr_t)ct & PAGEOFFSET)
114 		panic("mp_startup_init: cpu%d misaligned tables", cp->cpu_id);
115 
116 	ntss = cp->cpu_tss = &ct->ct_tss;
117 
118 #if defined(__amd64)
119 
120 	/*
121 	 * #DF (double fault).
122 	 */
123 	ntss->tss_ist1 = (uint64_t)&ct->ct_stack[sizeof (ct->ct_stack)];
124 
125 #elif defined(__i386)
126 
127 	ntss->tss_esp0 = ntss->tss_esp1 = ntss->tss_esp2 = ntss->tss_esp =
128 	    (uint32_t)&ct->ct_stack[sizeof (ct->ct_stack)];
129 
130 	ntss->tss_ss0 = ntss->tss_ss1 = ntss->tss_ss2 = ntss->tss_ss = KDS_SEL;
131 
132 	ntss->tss_eip = (uint32_t)cp->cpu_thread->t_pc;
133 
134 	ntss->tss_cs = KCS_SEL;
135 	ntss->tss_ds = ntss->tss_es = KDS_SEL;
136 	ntss->tss_fs = KFS_SEL;
137 	ntss->tss_gs = KGS_SEL;
138 
139 #endif	/* __i386 */
140 
141 	/*
142 	 * Set I/O bit map offset equal to size of TSS segment limit
143 	 * for no I/O permission map. This will cause all user I/O
144 	 * instructions to generate #gp fault.
145 	 */
146 	ntss->tss_bitmapbase = sizeof (*ntss);
147 
148 	/*
149 	 * Setup kernel tss.
150 	 */
151 	set_syssegd((system_desc_t *)&cp->cpu_gdt[GDT_KTSS], cp->cpu_tss,
152 	    sizeof (*cp->cpu_tss) - 1, SDT_SYSTSS, SEL_KPL);
153 
154 	/*
155 	 * Now copy all that we've set up onto the real mode platter
156 	 * for the real mode code to digest as part of starting the cpu.
157 	 */
158 
159 	rm->rm_idt_base = cp->cpu_idt;
160 	rm->rm_idt_lim = sizeof (*cp->cpu_idt) * NIDT - 1;
161 	rm->rm_gdt_base = cp->cpu_gdt;
162 	rm->rm_gdt_lim = sizeof (*cp->cpu_gdt) * NGDT - 1;
163 
164 	rm->rm_pdbr = getcr3();
165 	rm->rm_cpu = cp->cpu_id;
166 	rm->rm_x86feature = x86_feature;
167 	rm->rm_cr4 = getcr4();
168 
169 	rmp_gdt_init(rm);
170 
171 	return (ct);
172 }
173 
174 /*ARGSUSED*/
175 void
176 rmp_gdt_init(rm_platter_t *rm)
177 {
178 
179 #if defined(__amd64)
180 
181 	if (getcr3() > 0xffffffffUL)
182 		panic("Cannot initialize CPUs; kernel's 64-bit page tables\n"
183 		    "located above 4G in physical memory (@ 0x%lx)", getcr3());
184 
185 	/*
186 	 * Setup pseudo-descriptors for temporary GDT and IDT for use ONLY
187 	 * by code in real_mode_start():
188 	 *
189 	 * GDT[0]:  NULL selector
190 	 * GDT[1]:  64-bit CS: Long = 1, Present = 1, bits 12, 11 = 1
191 	 *
192 	 * Clear the IDT as interrupts will be off and a limit of 0 will cause
193 	 * the CPU to triple fault and reset on an NMI, seemingly as reasonable
194 	 * a course of action as any other, though it may cause the entire
195 	 * platform to reset in some cases...
196 	 */
197 	rm->rm_temp_gdt[0] = 0;
198 	rm->rm_temp_gdt[TEMPGDT_KCODE64] = 0x20980000000000ULL;
199 
200 	rm->rm_temp_gdt_lim = (ushort_t)(sizeof (rm->rm_temp_gdt) - 1);
201 	rm->rm_temp_gdt_base = rm_platter_pa +
202 	    (uint32_t)offsetof(rm_platter_t, rm_temp_gdt);
203 	rm->rm_temp_idt_lim = 0;
204 	rm->rm_temp_idt_base = 0;
205 
206 	/*
207 	 * Since the CPU needs to jump to protected mode using an identity
208 	 * mapped address, we need to calculate it here.
209 	 */
210 	rm->rm_longmode64_addr = rm_platter_pa +
211 	    ((uint32_t)long_mode_64 - (uint32_t)real_mode_start);
212 #endif	/* __amd64 */
213 }
214 
215 /*ARGSUSED*/
216 void
217 mach_cpucontext_free(struct cpu *cp, void *arg, int err)
218 {
219 	struct cpu_tables *ct = arg;
220 
221 	ASSERT(&ct->ct_tss == cp->cpu_tss);
222 
223 	switch (err) {
224 	case 0:
225 		break;
226 	case ETIMEDOUT:
227 		/*
228 		 * The processor was poked, but failed to start before
229 		 * we gave up waiting for it.  In case it starts later,
230 		 * don't free anything.
231 		 */
232 		break;
233 	default:
234 		/*
235 		 * Some other, passive, error occurred.
236 		 */
237 		kmem_free(ct, P2ROUNDUP(sizeof (*ct), PAGESIZE));
238 		cp->cpu_tss = NULL;
239 		break;
240 	}
241 }
242 
243 /*
244  * "Enter monitor."  Called via cross-call from stop_other_cpus().
245  */
246 void
247 mach_cpu_halt(char *msg)
248 {
249 	if (msg)
250 		prom_printf("%s\n", msg);
251 
252 	/*CONSTANTCONDITION*/
253 	while (1)
254 		;
255 }
256 
257 void
258 mach_cpu_idle(void)
259 {
260 	tlb_going_idle();
261 	i86_halt();
262 	tlb_service();
263 }
264 
265 void
266 mach_cpu_pause(volatile char *safe)
267 {
268 	/*
269 	 * This cpu is now safe.
270 	 */
271 	*safe = PAUSE_WAIT;
272 	membar_enter(); /* make sure stores are flushed */
273 
274 	/*
275 	 * Now we wait.  When we are allowed to continue, safe
276 	 * will be set to PAUSE_IDLE.
277 	 */
278 	while (*safe != PAUSE_IDLE)
279 		SMT_PAUSE();
280 }
281 
282 /*
283  * Power on CPU.
284  */
285 /*ARGSUSED*/
286 int
287 mp_cpu_poweron(struct cpu *cp)
288 {
289 	ASSERT(MUTEX_HELD(&cpu_lock));
290 	return (ENOTSUP);		/* not supported */
291 }
292 
293 /*
294  * Power off CPU.
295  */
296 /*ARGSUSED*/
297 int
298 mp_cpu_poweroff(struct cpu *cp)
299 {
300 	ASSERT(MUTEX_HELD(&cpu_lock));
301 	return (ENOTSUP);		/* not supported */
302 }
303 
304 /*
305  * Return vcpu state, since this could be a virtual environment that we
306  * are unaware of, return "unknown".
307  */
308 /* ARGSUSED */
309 int
310 vcpu_on_pcpu(processorid_t cpu)
311 {
312 	return (VCPU_STATE_UNKNOWN);
313 }
314