xref: /freebsd/sys/amd64/amd64/mpboot.S (revision c1d255d3)
1/*-
2 * Copyright (c) 2003 Peter Wemm
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 */
28
29#include <machine/asmacros.h>		/* miscellaneous asm macros */
30#include <machine/specialreg.h>
31
32#include "assym.inc"
33
34	.data				/* So we can modify it */
35
36	.p2align 4,0
37	.globl	mptramp_start
38mptramp_start:
39	.code16
40	/*
41	 * The AP enters here in response to the startup IPI.
42	 * We are in real mode. %cs is the only segment register set.
43	 */
44	cli				/* make sure no interrupts */
45	mov	%cs, %ax		/* copy %cs to %ds.  Remember these */
46	mov	%ax, %ds		/* are offsets rather than selectors */
47	mov	%ax, %ss
48
49	/*
50	 * Find relocation base and patch the gdt descriptor and ljmp targets
51	 */
52	xorl	%ebx,%ebx
53	mov	%cs, %bx
54	sall	$4, %ebx		/* %ebx is now our relocation base */
55	orl	%ebx, lgdt_desc-mptramp_start+2
56	orl	%ebx, jmp_32-mptramp_start+2
57	orl	%ebx, jmp_64-mptramp_start+1
58
59	/*
60	 * Load the descriptor table pointer.  We'll need it when running
61	 * in 16 bit protected mode.
62	 */
63	lgdt	lgdt_desc-mptramp_start
64
65	/* Enable protected mode */
66	movl	$CR0_PE, %eax
67	mov	%eax, %cr0
68
69	/*
70	 * Now execute a far jump to turn on protected mode.  This
71	 * causes the segment registers to turn into selectors and causes
72	 * %cs to be loaded from the gdt.
73	 *
74	 * The following instruction is:
75	 * ljmpl $bootcode-gdt, $protmode-mptramp_start
76	 * but gas cannot assemble that.  And besides, we patch the targets
77	 * in early startup and its a little clearer what we are patching.
78	 */
79jmp_32:
80	.byte	0x66			/* size override to 32 bits */
81	.byte	0xea			/* opcode for far jump */
82	.long	protmode-mptramp_start	/* offset in segment */
83	.word	bootcode-gdt		/* index in gdt for 32 bit code */
84
85	/*
86	 * At this point, we are running in 32 bit legacy protected mode.
87	 */
88	.code32
89protmode:
90	mov	$bootdata-gdt, %eax
91	mov	%ax, %ds
92
93	/*
94	 * Turn on the PAE bit and optionally the LA57 bit for when paging
95	 * is later enabled.
96	 */
97	mov	%cr4, %eax
98	orl	$(CR4_PAE | CR4_PGE), %eax
99	cmpb	$0, mptramp_la57-mptramp_start(%ebx)
100	je	1f
101	orl	$CR4_LA57, %eax
1021:	mov	%eax, %cr4
103
104	/*
105	 * If the BSP reported NXE support, enable EFER.NXE for all APs
106	 * prior to loading %cr3. This avoids page faults if the AP
107	 * encounters memory marked with the NX bit prior to detecting and
108	 * enabling NXE support.
109	 */
110	cmpb	$0,mptramp_nx-mptramp_start(%ebx)
111	je	2f
112	movl	$MSR_EFER, %ecx
113	rdmsr
114	orl	$EFER_NXE, %eax
115	wrmsr
1162:
117	/*
118	 * Enable EFER.LME so that we get long mode when all the prereqs are
119	 * in place.  In this case, it turns on when CR0_PG is finally enabled.
120	 * Pick up a few other EFER bits that we'll use need we're here.
121	 */
122	movl	$MSR_EFER, %ecx
123	rdmsr
124	orl	$EFER_LME | EFER_SCE, %eax
125	wrmsr
126
127	/*
128	 * Load kernel page table pointer into %cr3.
129	 * %ebx is still our relocation base.
130	 *
131	 * Note that this only gets accessed after we're actually in 64 bit
132	 * mode, however we can only set the bottom 32 bits of %cr3 in this
133	 * state.  This means we depend on the kernel page table being
134	 * allocated from the low 4G.
135	 */
136	leal	mptramp_pagetables-mptramp_start(%ebx),%eax
137	movl	(%eax), %eax
138	mov	%eax, %cr3
139
140	/*
141	 * Finally, switch to long bit mode by enabling paging.  We have
142	 * to be very careful here because all the segmentation disappears
143	 * out from underneath us.  The spec says we can depend on the
144	 * subsequent pipelined branch to execute, but *only if* everything
145	 * is still identity mapped.  If any mappings change, the pipeline
146	 * will flush.
147	 */
148	mov	%cr0, %eax
149	orl	$CR0_PG, %eax
150	mov	%eax, %cr0
151
152	/*
153	 * At this point paging is enabled, and we are in "compatibility" mode.
154	 * We do another far jump to reload %cs with the 64 bit selector.
155	 * %cr3 points to a 4- or 5-level page table.
156	 * We cannot yet jump all the way to the kernel because we can only
157	 * specify a 32 bit linear address.  So, we use yet another trampoline.
158	 *
159	 * The following instruction is:
160	 * ljmp $kernelcode-gdt, $tramp_64-mptramp_start
161	 * but gas cannot assemble that.  And besides, we patch the targets
162	 * in early startup and its a little clearer what we are patching.
163	 */
164jmp_64:
165	.byte	0xea			/* opcode for far jump */
166	.long	tramp_64-mptramp_start	/* offset in segment */
167	.word	kernelcode-gdt		/* index in gdt for 64 bit code */
168
169	/*
170	 * Yeehar!  We're running in 64 bit mode!  We can mostly ignore our
171	 * segment registers, and get on with it.
172	 * We are running at the correct virtual address space.
173	 * Note that the jmp is relative and that we've been relocated,
174	 * so use an indirect jump.
175	 */
176	.code64
177tramp_64:
178	movabsq	$entry_64,%rax		/* 64 bit immediate load */
179	jmp	*%rax
180
181	.p2align 4,0
182gdt:
183	/*
184	 * All segment descriptor tables start with a null descriptor
185	 */
186	.long	0x00000000
187	.long	0x00000000
188
189	/*
190	 * This is the 64 bit long mode code descriptor.  There is no
191	 * 64 bit data descriptor.
192	 */
193kernelcode:
194	.long	0x00000000
195	.long	0x00209800
196
197	/*
198	 * This is the descriptor for the 32 bit boot code.
199	 * %cs:  +A, +R, -C, DPL=0, +P, +D, +G
200	 * Accessed, Readable, Present, 32 bit, 4G granularity
201	 */
202bootcode:
203	.long	0x0000ffff
204	.long	0x00cf9b00
205
206	/*
207	 * This is the descriptor for the 32 bit boot data.
208	 * We load it into %ds and %ss.  The bits for each selector
209	 * are interpreted slightly differently.
210	 * %ds:  +A, +W, -E, DPL=0, +P, +D, +G
211	 * %ss:  +A, +W, -E, DPL=0, +P, +B, +G
212	 * Accessed, Writeable, Expand up, Present, 32 bit, 4GB
213	 * For %ds, +D means 'default operand size is 32 bit'.
214	 * For %ss, +B means the stack register is %esp rather than %sp.
215	 */
216bootdata:
217	.long	0x0000ffff
218	.long	0x00cf9300
219
220gdtend:
221
222	/*
223	 * The address of our page table pages that the boot code
224	 * uses to trampoline up to kernel address space.
225	 */
226	.globl	mptramp_pagetables
227mptramp_pagetables:
228	.long	0
229
230	/* 5-level paging ? */
231	.globl	mptramp_la57
232mptramp_la57:
233	.long	0
234
235	.globl	mptramp_nx
236mptramp_nx:
237	.long	0
238
239	/*
240	 * The pseudo descriptor for lgdt to use.
241	 */
242lgdt_desc:
243	.word	gdtend-gdt		/* Length */
244	.long	gdt-mptramp_start	/* Offset plus %ds << 4 */
245
246mptramp_end:
247	/*
248	 * The size of the trampoline code that needs to be relocated
249	 * below the 1MiB boundary.
250	 */
251	.globl	bootMP_size
252bootMP_size:
253	.long	mptramp_end - mptramp_start
254
255	/*
256	 * From here on down is executed in the kernel .text section.
257	 */
258	.text
259	.code64
260	.p2align 4,0
261entry_64:
262	movq	bootSTK, %rsp
263
264	/*
265	 * Initialize the segment register used for the PCPU area.  The PCPU
266	 * area will be initialized by init_secondary(), but it should be
267	 * accessible before that to support sanitizer instrumentation which
268	 * accesses per-CPU variables.
269	 *
270	 * Note that GS.base is loaded again in init_secondary().  This is not
271	 * redundant: lgdt() loads a selector into %gs and this has the side
272	 * effect of clearing GS.base.
273	 */
274	movl	$MSR_GSBASE, %ecx
275	movq	bootpcpu, %rax
276	movq	%rax, %rdx
277	shrq	$32, %rdx
278	wrmsr
279
280	jmp	init_secondary
281