xref: /openbsd/sys/arch/amd64/amd64/locore.S (revision 55fdb5fa)
1*55fdb5faSguenther/*	$OpenBSD: locore.S,v 1.136 2023/07/10 03:32:10 guenther Exp $	*/
2b5b9857bSart/*	$NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $	*/
3f5df1827Smickey
4f5df1827Smickey/*
5f5df1827Smickey * Copyright-o-rama!
6f5df1827Smickey */
7f5df1827Smickey
8f5df1827Smickey/*
9f5df1827Smickey * Copyright (c) 2001 Wasabi Systems, Inc.
10f5df1827Smickey * All rights reserved.
11f5df1827Smickey *
12f5df1827Smickey * Written by Frank van der Linden for Wasabi Systems, Inc.
13f5df1827Smickey *
14f5df1827Smickey * Redistribution and use in source and binary forms, with or without
15f5df1827Smickey * modification, are permitted provided that the following conditions
16f5df1827Smickey * are met:
17f5df1827Smickey * 1. Redistributions of source code must retain the above copyright
18f5df1827Smickey *    notice, this list of conditions and the following disclaimer.
19f5df1827Smickey * 2. Redistributions in binary form must reproduce the above copyright
20f5df1827Smickey *    notice, this list of conditions and the following disclaimer in the
21f5df1827Smickey *    documentation and/or other materials provided with the distribution.
22f5df1827Smickey * 3. All advertising materials mentioning features or use of this software
23f5df1827Smickey *    must display the following acknowledgement:
24f5df1827Smickey *      This product includes software developed for the NetBSD Project by
25f5df1827Smickey *      Wasabi Systems, Inc.
26f5df1827Smickey * 4. The name of Wasabi Systems, Inc. may not be used to endorse
27f5df1827Smickey *    or promote products derived from this software without specific prior
28f5df1827Smickey *    written permission.
29f5df1827Smickey *
30f5df1827Smickey * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
31f5df1827Smickey * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
32f5df1827Smickey * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
33f5df1827Smickey * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
34f5df1827Smickey * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35f5df1827Smickey * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36f5df1827Smickey * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37f5df1827Smickey * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
38f5df1827Smickey * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39f5df1827Smickey * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
40f5df1827Smickey * POSSIBILITY OF SUCH DAMAGE.
41f5df1827Smickey */
42f5df1827Smickey
43f5df1827Smickey
44f5df1827Smickey/*-
45f5df1827Smickey * Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
46f5df1827Smickey * All rights reserved.
47f5df1827Smickey *
48f5df1827Smickey * This code is derived from software contributed to The NetBSD Foundation
49f5df1827Smickey * by Charles M. Hannum.
50f5df1827Smickey *
51f5df1827Smickey * Redistribution and use in source and binary forms, with or without
52f5df1827Smickey * modification, are permitted provided that the following conditions
53f5df1827Smickey * are met:
54f5df1827Smickey * 1. Redistributions of source code must retain the above copyright
55f5df1827Smickey *    notice, this list of conditions and the following disclaimer.
56f5df1827Smickey * 2. Redistributions in binary form must reproduce the above copyright
57f5df1827Smickey *    notice, this list of conditions and the following disclaimer in the
58f5df1827Smickey *    documentation and/or other materials provided with the distribution.
59f5df1827Smickey *
60f5df1827Smickey * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
61f5df1827Smickey * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
62f5df1827Smickey * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
63f5df1827Smickey * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
64f5df1827Smickey * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
65f5df1827Smickey * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
66f5df1827Smickey * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
67f5df1827Smickey * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
68f5df1827Smickey * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
69f5df1827Smickey * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
70f5df1827Smickey * POSSIBILITY OF SUCH DAMAGE.
71f5df1827Smickey */
72f5df1827Smickey
73f5df1827Smickey/*-
74f5df1827Smickey * Copyright (c) 1990 The Regents of the University of California.
75f5df1827Smickey * All rights reserved.
76f5df1827Smickey *
77f5df1827Smickey * This code is derived from software contributed to Berkeley by
78f5df1827Smickey * William Jolitz.
79f5df1827Smickey *
80f5df1827Smickey * Redistribution and use in source and binary forms, with or without
81f5df1827Smickey * modification, are permitted provided that the following conditions
82f5df1827Smickey * are met:
83f5df1827Smickey * 1. Redistributions of source code must retain the above copyright
84f5df1827Smickey *    notice, this list of conditions and the following disclaimer.
85f5df1827Smickey * 2. Redistributions in binary form must reproduce the above copyright
86f5df1827Smickey *    notice, this list of conditions and the following disclaimer in the
87f5df1827Smickey *    documentation and/or other materials provided with the distribution.
88b5b9857bSart * 3. Neither the name of the University nor the names of its contributors
89f5df1827Smickey *    may be used to endorse or promote products derived from this software
90f5df1827Smickey *    without specific prior written permission.
91f5df1827Smickey *
92f5df1827Smickey * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
93f5df1827Smickey * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
94f5df1827Smickey * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
95f5df1827Smickey * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
96f5df1827Smickey * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
97f5df1827Smickey * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
98f5df1827Smickey * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
99f5df1827Smickey * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
100f5df1827Smickey * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
101f5df1827Smickey * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
102f5df1827Smickey * SUCH DAMAGE.
103f5df1827Smickey *
104f5df1827Smickey *	@(#)locore.s	7.3 (Berkeley) 5/13/91
105f5df1827Smickey */
106f5df1827Smickey
107f5df1827Smickey#include "assym.h"
108f3c5c958Skettenis#include "efi.h"
109f5df1827Smickey#include "lapic.h"
110f5df1827Smickey#include "ksyms.h"
111d8213a49Smikeb#include "xen.h"
112218ead0bSmikeb#include "hyperv.h"
113f5df1827Smickey
114f5df1827Smickey#include <sys/syscall.h>
115f5df1827Smickey
116f5df1827Smickey#include <machine/param.h>
117c9de630fSguenther#include <machine/codepatch.h>
118b767b017Sguenther#include <machine/psl.h>
119f5df1827Smickey#include <machine/segments.h>
120f5df1827Smickey#include <machine/specialreg.h>
121c9de630fSguenther#include <machine/trap.h>			/* T_PROTFLT */
122f5df1827Smickey#include <machine/frameasm.h>
123f5df1827Smickey
124f5df1827Smickey#if NLAPIC > 0
125f5df1827Smickey#include <machine/i82489reg.h>
126f5df1827Smickey#endif
127f5df1827Smickey
128f5df1827Smickey/*
129f5df1827Smickey * override user-land alignment before including asm.h
130f5df1827Smickey */
131cb5172cdSderaadt#define	ALIGN_DATA	.align	8,0xcc
132f5df1827Smickey
133f5df1827Smickey#include <machine/asm.h>
134f5df1827Smickey
135fbe53cacSkrw#define SET_CURPROC(proc,cpu)			\
136fbe53cacSkrw	movq	CPUVAR(SELF),cpu	;	\
137fbe53cacSkrw	movq	proc,CPUVAR(CURPROC)      ;	\
138fbe53cacSkrw	movq	cpu,P_CPU(proc)
139fbe53cacSkrw
140fbe53cacSkrw#define GET_CURPCB(reg)			movq	CPUVAR(CURPCB),reg
141fbe53cacSkrw#define SET_CURPCB(reg)			movq	reg,CPUVAR(CURPCB)
142fbe53cacSkrw
143fbe53cacSkrw
144f5df1827Smickey/*
145f5df1827Smickey * Initialization
146f5df1827Smickey */
147f5df1827Smickey	.data
148f5df1827Smickey
149f5df1827Smickey#if NLAPIC > 0
1500175496dSderaadt	.align	NBPG, 0xcc
1514ce05526Sguenther	.globl	local_apic, lapic_id, lapic_tpr
1524ce05526Sguentherlocal_apic:
153f5df1827Smickey	.space	LAPIC_ID
1544ce05526Sguentherlapic_id:
155f5df1827Smickey	.long	0x00000000
156f5df1827Smickey	.space	LAPIC_TPRI-(LAPIC_ID+4)
1574ce05526Sguentherlapic_tpr:
158f5df1827Smickey	.space	LAPIC_PPRI-LAPIC_TPRI
1594ce05526Sguentherlapic_ppr:
160f5df1827Smickey	.space	LAPIC_ISR-LAPIC_PPRI
1614ce05526Sguentherlapic_isr:
162f5df1827Smickey	.space	NBPG-LAPIC_ISR
163f5df1827Smickey#endif
164f5df1827Smickey
165f5df1827Smickey/*****************************************************************************/
166f5df1827Smickey
167f5df1827Smickey/*
168b767b017Sguenther * Signal trampoline; copied to a page mapped into userspace.
169aa7a0a27Sguenther * gdb's backtrace logic matches against the instructions in this.
170f5df1827Smickey */
171b983598cSderaadt	.section .rodata
1724ce05526Sguenther	.globl	sigcode
1734ce05526Sguenthersigcode:
174339eb9d2Sderaadt	endbr64
175421775b1Sguenther	call	1f
176f5df1827Smickey	movq	%rsp,%rdi
177f5df1827Smickey	pushq	%rdi			/* fake return address */
178f5df1827Smickey	movq	$SYS_sigreturn,%rax
1791396572dSguenther	syscall
1804ce05526Sguenther	.globl	sigcoderet
1814ce05526Sguenthersigcoderet:
182f5df1827Smickey	movq	$SYS_exit,%rax
183f5df1827Smickey	syscall
184b433e1a0Sguenther	_ALIGN_TRAPS
185421775b1Sguenther1:	JMP_RETPOLINE(rax)
1864ce05526Sguenther	.globl	esigcode
1874ce05526Sguentheresigcode:
188f5df1827Smickey
1894ce05526Sguenther	.globl	sigfill
1904ce05526Sguenthersigfill:
191b983598cSderaadt	int3
1924ce05526Sguentheresigfill:
1934ce05526Sguenther	.globl	sigfillsiz
1944ce05526Sguenthersigfillsiz:
1954ce05526Sguenther	.long	esigfill - sigfill
196b983598cSderaadt
197b983598cSderaadt	.text
198f5df1827Smickey/*
199f5df1827Smickey * void lgdt(struct region_descriptor *rdp);
200f5df1827Smickey * Change the global descriptor table.
201f5df1827Smickey */
202f5df1827SmickeyNENTRY(lgdt)
203db0a8dc5Smortimer	RETGUARD_SETUP(lgdt, r11)
204f5df1827Smickey	/* Reload the descriptor table. */
205f5df1827Smickey	movq	%rdi,%rax
206f5df1827Smickey	lgdt	(%rax)
207f5df1827Smickey	/* Flush the prefetch q. */
208f5df1827Smickey	jmp	1f
209f5df1827Smickey	nop
210f5df1827Smickey1:	/* Reload "stale" selectors. */
211f5df1827Smickey	movl	$GSEL(GDATA_SEL, SEL_KPL),%eax
212f5df1827Smickey	movl	%eax,%ds
213f5df1827Smickey	movl	%eax,%es
214f5df1827Smickey	movl	%eax,%ss
215f5df1827Smickey	/* Reload code selector by doing intersegment return. */
216f5df1827Smickey	popq	%rax
217f5df1827Smickey	pushq	$GSEL(GCODE_SEL, SEL_KPL)
218f5df1827Smickey	pushq	%rax
219db0a8dc5Smortimer	RETGUARD_CHECK(lgdt, r11)
220f5df1827Smickey	lretq
221a324dee9SguentherEND(lgdt)
222f5df1827Smickey
223f3c5c958Skettenis#if defined(DDB) || NEFI > 0
224f5df1827SmickeyENTRY(setjmp)
2250c4bf09dSmortimer	RETGUARD_SETUP(setjmp, r11)
226f5df1827Smickey	/*
227f5df1827Smickey	 * Only save registers that must be preserved across function
228f5df1827Smickey	 * calls according to the ABI (%rbx, %rsp, %rbp, %r12-%r15)
229f5df1827Smickey	 * and %rip.
230f5df1827Smickey	 */
231f5df1827Smickey	movq	%rdi,%rax
232f5df1827Smickey	movq	%rbx,(%rax)
233f5df1827Smickey	movq	%rsp,8(%rax)
234f5df1827Smickey	movq	%rbp,16(%rax)
235f5df1827Smickey	movq	%r12,24(%rax)
236f5df1827Smickey	movq	%r13,32(%rax)
237f5df1827Smickey	movq	%r14,40(%rax)
238f5df1827Smickey	movq	%r15,48(%rax)
239f5df1827Smickey	movq	(%rsp),%rdx
240f5df1827Smickey	movq	%rdx,56(%rax)
241f5df1827Smickey	xorl	%eax,%eax
2420c4bf09dSmortimer	RETGUARD_CHECK(setjmp, r11)
243f5df1827Smickey	ret
2443dd0809fSbluhm	lfence
245a324dee9SguentherEND(setjmp)
246f5df1827Smickey
247f5df1827SmickeyENTRY(longjmp)
248f5df1827Smickey	movq	%rdi,%rax
249f5df1827Smickey	movq	8(%rax),%rsp
2500c4bf09dSmortimer	movq	56(%rax),%rdx
2510c4bf09dSmortimer	movq	%rdx,(%rsp)
2520c4bf09dSmortimer	RETGUARD_SETUP(longjmp, r11)
2530c4bf09dSmortimer	movq	(%rax),%rbx
254f5df1827Smickey	movq	16(%rax),%rbp
255f5df1827Smickey	movq	24(%rax),%r12
256f5df1827Smickey	movq	32(%rax),%r13
257f5df1827Smickey	movq	40(%rax),%r14
258f5df1827Smickey	movq	48(%rax),%r15
259f5df1827Smickey	xorl	%eax,%eax
260f5df1827Smickey	incl	%eax
2610c4bf09dSmortimer	RETGUARD_CHECK(longjmp, r11)
262f5df1827Smickey	ret
2633dd0809fSbluhm	lfence
264a324dee9SguentherEND(longjmp)
265f3c5c958Skettenis#endif /* DDB || NEFI > 0 */
266f5df1827Smickey
267f5df1827Smickey/*****************************************************************************/
268f5df1827Smickey
269f5df1827Smickey/*
27045053f4aSart * int cpu_switchto(struct proc *old, struct proc *new)
27145053f4aSart * Switch from "old" proc to "new".
272f5df1827Smickey */
27345053f4aSartENTRY(cpu_switchto)
274f5df1827Smickey	pushq	%rbx
275f5df1827Smickey	pushq	%rbp
276f5df1827Smickey	pushq	%r12
277f5df1827Smickey	pushq	%r13
278f5df1827Smickey	pushq	%r14
279f5df1827Smickey	pushq	%r15
280f5df1827Smickey
281fbe53cacSkrw	movq	%rdi, %r13
282fbe53cacSkrw	movq	%rsi, %r12
283fbe53cacSkrw
28460854cb9Sguenther	/* Record new proc. */
285fbe53cacSkrw	movb	$SONPROC,P_STAT(%r12)	# p->p_stat = SONPROC
286fbe53cacSkrw	SET_CURPROC(%r12,%rcx)
28760854cb9Sguenther
288c9de630fSguenther	movl	CPUVAR(CPUID),%r9d
289c9de630fSguenther
290c9de630fSguenther	/* for the FPU/"extended CPU state" handling below */
291c9de630fSguenther	movq	xsave_mask(%rip),%rdx
292c9de630fSguenther	movl	%edx,%eax
293c9de630fSguenther	shrq	$32,%rdx
294fd94711fSguenther
295fbe53cacSkrw	/* If old proc exited, don't bother. */
2969f1181d5Sguenther	xorl	%ecx,%ecx
297fbe53cacSkrw	testq	%r13,%r13
298f5df1827Smickey	jz	switch_exited
299f5df1827Smickey
300fbe53cacSkrw	/*
301fbe53cacSkrw	 * Save old context.
302fbe53cacSkrw	 *
303fbe53cacSkrw	 * Registers:
3049f1181d5Sguenther	 *   %rax - scratch
305fbe53cacSkrw	 *   %r13 - old proc, then old pcb
3069f1181d5Sguenther	 *   %rcx - old pmap if not P_SYSTEM
307fbe53cacSkrw	 *   %r12 - new proc
308c9de630fSguenther	 *   %r9d - cpuid
309fbe53cacSkrw	 */
310fbe53cacSkrw
3119f1181d5Sguenther	/* remember the pmap if not P_SYSTEM */
3129f1181d5Sguenther	testl	$P_SYSTEM,P_FLAG(%r13)
313fbe53cacSkrw	movq	P_ADDR(%r13),%r13
3149f1181d5Sguenther	jnz	0f
315fd94711fSguenther	movq	PCB_PMAP(%r13),%rcx
3169f1181d5Sguenther0:
317fd94711fSguenther
318f5df1827Smickey	/* Save stack pointers. */
319f5df1827Smickey	movq	%rsp,PCB_RSP(%r13)
320f5df1827Smickey	movq	%rbp,PCB_RBP(%r13)
321fbe53cacSkrw
322c9de630fSguenther	/*
323c9de630fSguenther	 * If the old proc ran in userspace then save the
324c9de630fSguenther	 * floating-point/"extended state" registers
325c9de630fSguenther	 */
3260403d5bcSguenther	testl	$CPUPF_USERXSTATE,CPUVAR(PFLAGS)
327c9de630fSguenther	jz	.Lxstate_reset
328c9de630fSguenther
329c9de630fSguenther	movq	%r13, %rdi
330c9de630fSguenther#if PCB_SAVEFPU != 0
331c9de630fSguenther	addq	$PCB_SAVEFPU,%rdi
332c9de630fSguenther#endif
333c9de630fSguenther	CODEPATCH_START
334ae97d4fcSguenther	fxsave64	(%rdi)
335c9de630fSguenther	CODEPATCH_END(CPTAG_XSAVE)
336c9de630fSguenther
337f5df1827Smickeyswitch_exited:
338c9de630fSguenther	/* now clear the xstate */
339c9de630fSguenther	movq	proc0paddr(%rip),%rdi
340c9de630fSguenther#if PCB_SAVEFPU != 0
341c9de630fSguenther	addq	$PCB_SAVEFPU,%rdi
342c9de630fSguenther#endif
343c9de630fSguenther	CODEPATCH_START
344ae97d4fcSguenther	fxrstor64	(%rdi)
345*55fdb5faSguenther	CODEPATCH_END(CPTAG_XRSTORS)
3460403d5bcSguenther	andl	$~CPUPF_USERXSTATE,CPUVAR(PFLAGS)
347c9de630fSguenther
348c9de630fSguenther.Lxstate_reset:
349c9de630fSguenther	/*
350c9de630fSguenther	 * If the segment registers haven't been reset since the old proc
351c9de630fSguenther	 * ran in userspace then reset them now
352c9de630fSguenther	 */
3530403d5bcSguenther	testl	$CPUPF_USERSEGS,CPUVAR(PFLAGS)
354c9de630fSguenther	jz	restore_saved
3550403d5bcSguenther	andl	$~CPUPF_USERSEGS,CPUVAR(PFLAGS)
356b13138f2Sguenther
35799c80879Sguenther	/* set %ds, %es, %fs, and %gs to expected value to prevent info leak */
358b13138f2Sguenther	movw	$(GSEL(GUDATA_SEL, SEL_UPL)),%ax
359b13138f2Sguenther	movw	%ax,%ds
360b13138f2Sguenther	movw	%ax,%es
361b13138f2Sguenther	movw	%ax,%fs
36299c80879Sguenther	cli			/* block interrupts when on user GS.base */
36399c80879Sguenther	swapgs			/* switch from kernel to user GS.base */
36499c80879Sguenther	movw	%ax,%gs		/* set %gs to UDATA and GS.base to 0 */
36599c80879Sguenther	swapgs			/* back to kernel GS.base */
366b13138f2Sguenther
367b13138f2Sguentherrestore_saved:
368f5df1827Smickey	/*
36945053f4aSart	 * Restore saved context.
370f5df1827Smickey	 *
371f5df1827Smickey	 * Registers:
3729f1181d5Sguenther	 *   %rax, %rdx - scratch
3739f1181d5Sguenther	 *   %rcx - old pmap if not P_SYSTEM
374fbe53cacSkrw	 *   %r12 - new process
3759f1181d5Sguenther	 *   %r13 - new pcb
3769f1181d5Sguenther	 *   %rbx - new pmap if not P_SYSTEM
377f5df1827Smickey	 */
378f5df1827Smickey
3799f1181d5Sguenther	movq	P_ADDR(%r12),%r13
3809f1181d5Sguenther
3819f1181d5Sguenther	/* remember the pmap if not P_SYSTEM */
3829f1181d5Sguenther	xorl	%ebx,%ebx
3839f1181d5Sguenther	testl	$P_SYSTEM,P_FLAG(%r12)
3849f1181d5Sguenther	jnz	1f
3859f1181d5Sguenther	movq	PCB_PMAP(%r13),%rbx
3869f1181d5Sguenther1:
3879f1181d5Sguenther
388fbe53cacSkrw	/* No interrupts while loading new state. */
389fbe53cacSkrw	cli
390fbe53cacSkrw
391f5df1827Smickey	/* Restore stack pointers. */
392f5df1827Smickey	movq	PCB_RSP(%r13),%rsp
393f5df1827Smickey	movq	PCB_RBP(%r13),%rbp
394f5df1827Smickey
395db0a8dc5Smortimer	/* Stack pivot done, setup RETGUARD */
396db0a8dc5Smortimer	RETGUARD_SETUP_OFF(cpu_switchto, r11, 6*8)
397db0a8dc5Smortimer
39800ac7aa2Sguenther	/* don't switch cr3 to the same thing it already was */
39924056ac0Sguenther	movq	PCB_CR3(%r13),%rax
40024056ac0Sguenther	movq	%cr3,%rdi
40124056ac0Sguenther	xorq	%rax,%rdi
40224056ac0Sguenther	btrq	$63,%rdi	/* ignore CR3_REUSE_PCID */
40324056ac0Sguenther	testq	%rdi,%rdi
40400ac7aa2Sguenther	jz	.Lsame_cr3
40500ac7aa2Sguenther
4069f1181d5Sguenther#ifdef DIAGNOSTIC
40724056ac0Sguenther	/* verify ci_proc_pmap had been updated properly */
40824056ac0Sguenther	cmpq	%rcx,CPUVAR(PROC_PMAP)
40924056ac0Sguenther	jnz	.Lbogus_proc_pmap
4109f1181d5Sguenther#endif
41124056ac0Sguenther	/* record which pmap this CPU should get IPIs for */
41224056ac0Sguenther	movq	%rbx,CPUVAR(PROC_PMAP)
4139f1181d5Sguenther
41424056ac0Sguenther.Lset_cr3:
415b767b017Sguenther	movq	%rax,%cr3			/* %rax used below too */
416fd94711fSguenther
41700ac7aa2Sguenther.Lsame_cr3:
418a1fa3538Sguenther	/*
419a1fa3538Sguenther	 * If we switched from a userland thread with a shallow call stack
420a1fa3538Sguenther	 * (e.g interrupt->ast->mi_ast->prempt->mi_switch->cpu_switchto)
421a1fa3538Sguenther	 * then the RSB may have attacker controlled entries when we switch
422a1fa3538Sguenther	 * to a deeper call stack in the new thread.  Refill the RSB with
423a1fa3538Sguenther	 * entries safe to speculate into/through.
424a1fa3538Sguenther	 */
425a1fa3538Sguenther	RET_STACK_REFILL_WITH_RCX
426a1fa3538Sguenther
427fbe53cacSkrw	/* Don't bother with the rest if switching to a system process. */
4289f1181d5Sguenther	testq	%rbx,%rbx
4299f1181d5Sguenther	jz	switch_restored
430fd94711fSguenther
431b767b017Sguenther	/* record the bits needed for future U-->K transition */
432b767b017Sguenther	movq	PCB_KSTACK(%r13),%rdx
433b767b017Sguenther	subq	$FRAMESIZE,%rdx
434b767b017Sguenther	movq	%rdx,CPUVAR(KERN_RSP)
435b767b017Sguenther
4361fc8fad1Sguenther	CODEPATCH_START
437b767b017Sguenther	/*
438b767b017Sguenther	 * Meltdown: iff we're doing separate U+K and U-K page tables,
439b767b017Sguenther	 * then record them in cpu_info for easy access in syscall and
4401fc8fad1Sguenther	 * interrupt trampolines.
441b767b017Sguenther	 */
4429f1181d5Sguenther	movq	PM_PDIRPA_INTEL(%rbx),%rdx
443f95e373fSguenther	orq	cr3_reuse_pcid,%rax
444f95e373fSguenther	orq	cr3_pcid_proc_intel,%rdx
445b767b017Sguenther	movq	%rax,CPUVAR(KERN_CR3)
446b767b017Sguenther	movq	%rdx,CPUVAR(USER_CR3)
4471fc8fad1Sguenther	CODEPATCH_END(CPTAG_MELTDOWN_NOP)
448b767b017Sguenther
449fbe53cacSkrwswitch_restored:
450fbe53cacSkrw	SET_CURPCB(%r13)
451fbe53cacSkrw
452f5df1827Smickey	/* Interrupts are okay again. */
453f5df1827Smickey	sti
454f5df1827Smickey	popq	%r15
455f5df1827Smickey	popq	%r14
456f5df1827Smickey	popq	%r13
457f5df1827Smickey	popq	%r12
458f5df1827Smickey	popq	%rbp
459f5df1827Smickey	popq	%rbx
460db0a8dc5Smortimer	RETGUARD_CHECK(cpu_switchto, r11)
461f5df1827Smickey	ret
4623dd0809fSbluhm	lfence
46324056ac0Sguenther
46424056ac0Sguenther#ifdef DIAGNOSTIC
46524056ac0Sguenther.Lbogus_proc_pmap:
46624056ac0Sguenther	leaq	bogus_proc_pmap,%rdi
4674ce05526Sguenther	call	panic
46824056ac0Sguenther	int3	/* NOTREACHED */
46924056ac0Sguenther	.pushsection .rodata
47024056ac0Sguentherbogus_proc_pmap:
47124056ac0Sguenther	.asciz	"curcpu->ci_proc_pmap didn't point to previous pmap"
47224056ac0Sguenther	.popsection
47324056ac0Sguenther#endif /* DIAGNOSTIC */
474a324dee9SguentherEND(cpu_switchto)
475f5df1827Smickey
47645053f4aSartENTRY(cpu_idle_enter)
47745053f4aSart	ret
4783dd0809fSbluhm	lfence
479f7a56ef1SguentherEND(cpu_idle_enter)
480f5df1827Smickey
48145053f4aSartENTRY(cpu_idle_leave)
482421775b1Sguenther	ret
4833dd0809fSbluhm	lfence
484f7a56ef1SguentherEND(cpu_idle_leave)
485421775b1Sguenther
486421775b1Sguenther/* placed here for correct static branch prediction in cpu_idle_* */
487421775b1SguentherNENTRY(retpoline_rax)
488421775b1Sguenther	JMP_RETPOLINE(rax)
489421775b1Sguenther
490421775b1SguentherENTRY(cpu_idle_cycle)
491f7a56ef1Sguenther	RETGUARD_SETUP(cpu_idle_cycle, r11)
4924ce05526Sguenther	movq	cpu_idle_cycle_fcn,%rax
493421775b1Sguenther	cmpq	$0,%rax
494421775b1Sguenther	jne	retpoline_rax
495421775b1Sguenther	sti
496421775b1Sguenther	hlt
497f7a56ef1Sguenther	RETGUARD_CHECK(cpu_idle_cycle, r11)
49845053f4aSart	ret
4993dd0809fSbluhm	lfence
500f7a56ef1SguentherEND(cpu_idle_cycle)
501f5df1827Smickey
502f5df1827Smickey/*
503f5df1827Smickey * savectx(struct pcb *pcb);
504f5df1827Smickey * Update pcb, saving current processor state.
505f5df1827Smickey */
506f5df1827SmickeyENTRY(savectx)
507db0a8dc5Smortimer	RETGUARD_SETUP(savectx, r11)
508f5df1827Smickey	/* Save stack pointers. */
509f5df1827Smickey	movq	%rsp,PCB_RSP(%rdi)
510f5df1827Smickey	movq	%rbp,PCB_RBP(%rdi)
511db0a8dc5Smortimer	RETGUARD_CHECK(savectx, r11)
512f5df1827Smickey	ret
5133dd0809fSbluhm	lfence
514a324dee9SguentherEND(savectx)
515f5df1827Smickey
516f5df1827SmickeyIDTVEC(syscall32)
517f5df1827Smickey	sysret		/* go away please */
518a324dee9SguentherEND(Xsyscall32)
519f5df1827Smickey
520f5df1827Smickey/*
521b767b017Sguenther * syscall insn entry.
52274ebaa6aSguenther * Enter here with interrupts blocked; %rcx contains the caller's
52374ebaa6aSguenther * %rip and the original rflags has been copied to %r11.  %cs and
52474ebaa6aSguenther * %ss have been updated to the kernel segments, but %rsp is still
52574ebaa6aSguenther * the user-space value.
526c9de630fSguenther * First order of business is to swap to the kernel GS.base so that
5271fc8fad1Sguenther * we can access our struct cpu_info.  After possibly mucking with
5281fc8fad1Sguenther * pagetables, we switch to our kernel stack.  Once that's in place
529bb386764Sguenther * we can save the rest of the syscall frame and unblock interrupts.
5301fc8fad1Sguenther */
5311fc8fad1SguentherKUTEXT_PAGE_START
5321fc8fad1Sguenther 	.align	NBPG, 0xcc
5331fc8fad1SguentherXUsyscall_meltdown:
5341fc8fad1Sguenther	/*
5351fc8fad1Sguenther	 * This is the real Xsyscall_meltdown page, which is mapped into
5361fc8fad1Sguenther	 * the U-K page tables at the same location as Xsyscall_meltdown
5371fc8fad1Sguenther	 * below.  For this, the Meltdown case, we use the scratch space
5381fc8fad1Sguenther	 * in cpu_info so we can switch to the kernel page tables
5391fc8fad1Sguenther	 * (thank you, Intel), at which point we'll continue at the
540bb386764Sguenther	 * "SYSCALL_ENTRY" after Xsyscall below.
5411fc8fad1Sguenther	 * In case the CPU speculates past the mov to cr3, we put a
5425c3fa5a3Sguenther	 * retpoline-style pause-lfence-jmp-to-pause loop.
54374ebaa6aSguenther	 */
5440e2deb64Sderaadt	endbr64
545f5df1827Smickey	swapgs
5461fc8fad1Sguenther	movq	%rax,CPUVAR(SCRATCH)
5471fc8fad1Sguenther	movq	CPUVAR(KERN_CR3),%rax
5481fc8fad1Sguenther	movq	%rax,%cr3
5491fc8fad1Sguenther0:	pause
550a1fa3538Sguenther	lfence
5511fc8fad1Sguenther	jmp	0b
5521fc8fad1SguentherKUTEXT_PAGE_END
553b767b017Sguenther
5541fc8fad1SguentherKTEXT_PAGE_START
5551fc8fad1Sguenther	.align	NBPG, 0xcc
5560e2deb64SderaadtGENTRY(Xsyscall_meltdown)
5571fc8fad1Sguenther	/* pad to match real Xsyscall_meltdown positioning above */
5581fc8fad1Sguenther	movq	CPUVAR(KERN_CR3),%rax
5591fc8fad1Sguenther	movq	%rax,%cr3
5600e2deb64SderaadtGENTRY(Xsyscall)
5610e2deb64Sderaadt	endbr64
5621fc8fad1Sguenther	swapgs
5631fc8fad1Sguenther	movq	%rax,CPUVAR(SCRATCH)
564bb386764Sguenther	SYSCALL_ENTRY			/* create trapframe */
565f5df1827Smickey	sti
566f5df1827Smickey
567f5df1827Smickey	movq	CPUVAR(CURPROC),%r14
568f5df1827Smickey	movq	%rsp,P_MD_REGS(%r14)	# save pointer to frame
569f5df1827Smickey	andl	$~MDP_IRET,P_MD_FLAGS(%r14)
570b5b9857bSart	movq	%rsp,%rdi
5714ce05526Sguenther	call	syscall
572c9ad316fSguenther
573c9ad316fSguenther.Lsyscall_check_asts:
574c9ad316fSguenther	/* Check for ASTs on exit to user mode. */
575f5df1827Smickey	cli
576f5df1827Smickey	CHECK_ASTPENDING(%r11)
577f5df1827Smickey	je	2f
578f5df1827Smickey	CLEAR_ASTPENDING(%r11)
579f5df1827Smickey	sti
580b5b9857bSart	movq	%rsp,%rdi
5814ce05526Sguenther	call	ast
582c9ad316fSguenther	jmp	.Lsyscall_check_asts
583c9ad316fSguenther
584f5df1827Smickey2:
585f5df1827Smickey#ifdef DIAGNOSTIC
586b5b9857bSart	cmpl	$IPL_NONE,CPUVAR(ILEVEL)
587c9ad316fSguenther	jne	.Lsyscall_spl_not_lowered
5881396572dSguenther#endif /* DIAGNOSTIC */
5891396572dSguenther
590c9ad316fSguenther	/* Could registers have been changed that require an iretq? */
591c9ad316fSguenther	testl	$MDP_IRET, P_MD_FLAGS(%r14)
59231b8ac92Sguenther	jne	intr_user_exit_post_ast
593c9ad316fSguenther
594c9de630fSguenther	/* Restore FPU/"extended CPU state" if it's not already in the CPU */
5950403d5bcSguenther	testl	$CPUPF_USERXSTATE,CPUVAR(PFLAGS)
596c9de630fSguenther	jz	.Lsyscall_restore_xstate
597c9de630fSguenther
598c9de630fSguenther	/* Restore FS.base if it's not already in the CPU */
5990403d5bcSguenther	testl	$CPUPF_USERSEGS,CPUVAR(PFLAGS)
600c9de630fSguenther	jz	.Lsyscall_restore_fsbase
601c9de630fSguenther
602c9de630fSguenther.Lsyscall_restore_registers:
603e9e0c464Sderaadt	call	pku_xonly
604a4858df8Sguenther	RET_STACK_REFILL_WITH_RCX
605a4858df8Sguenther
6061396572dSguenther	movq	TF_R8(%rsp),%r8
6071396572dSguenther	movq	TF_R9(%rsp),%r9
6081396572dSguenther	movq	TF_R10(%rsp),%r10
6091396572dSguenther	movq	TF_R12(%rsp),%r12
6101396572dSguenther	movq	TF_R13(%rsp),%r13
6111396572dSguenther	movq	TF_R14(%rsp),%r14
6121396572dSguenther	movq	TF_R15(%rsp),%r15
613a0dcb178Sguenther
614a0dcb178Sguenther	CODEPATCH_START
615a0dcb178Sguenther	movw	%ds,TF_R8(%rsp)
616a0dcb178Sguenther	verw	TF_R8(%rsp)
617a0dcb178Sguenther	CODEPATCH_END(CPTAG_MDS)
618a0dcb178Sguenther
619a0dcb178Sguenther	movq	TF_RDI(%rsp),%rdi
620a0dcb178Sguenther	movq	TF_RSI(%rsp),%rsi
6211396572dSguenther	movq	TF_RBP(%rsp),%rbp
6221396572dSguenther	movq	TF_RBX(%rsp),%rbx
6231396572dSguenther
624b767b017Sguenther	/*
625b767b017Sguenther	 * We need to finish reading from the trapframe, then switch
626b767b017Sguenther	 * to the user page tables, swapgs, and return.  We need
627b767b017Sguenther	 * to get the final value for the register that was used
628b767b017Sguenther	 * for the mov to %cr3 from somewhere accessible on the
629b767b017Sguenther	 * user page tables, so save it in CPUVAR(SCRATCH) across
630b767b017Sguenther	 * the switch.
631b767b017Sguenther	 */
6321396572dSguenther	movq	TF_RDX(%rsp),%rdx
6331396572dSguenther	movq	TF_RAX(%rsp),%rax
6341396572dSguenther	movq	TF_RIP(%rsp),%rcx
6351396572dSguenther	movq	TF_RFLAGS(%rsp),%r11
6361396572dSguenther	movq	TF_RSP(%rsp),%rsp
6371fc8fad1Sguenther	CODEPATCH_START
6381fc8fad1Sguenther	movq	%rax,CPUVAR(SCRATCH)
6391fc8fad1Sguenther	movq	CPUVAR(USER_CR3),%rax
640f95e373fSguenther	PCID_SET_REUSE_NOP
641b767b017Sguenther	movq	%rax,%cr3
6421fc8fad1SguentherXsyscall_trampback:
6431fc8fad1Sguenther0:	pause
644a1fa3538Sguenther	lfence
6451fc8fad1Sguenther	jmp	0b
6461fc8fad1Sguenther	CODEPATCH_END(CPTAG_MELTDOWN_NOP)
647b767b017Sguenther	swapgs
648f5df1827Smickey	sysretq
649a324dee9SguentherEND(Xsyscall)
650a324dee9SguentherEND(Xsyscall_meltdown)
6511fc8fad1SguentherKTEXT_PAGE_END
6521fc8fad1Sguenther
6531fc8fad1SguentherKUTEXT_PAGE_START
6541fc8fad1Sguenther	.space	(Xsyscall_trampback - Xsyscall_meltdown) - \
6551fc8fad1Sguenther		(. - XUsyscall_meltdown), 0xcc
6561fc8fad1Sguenther	movq	%rax,%cr3
6571fc8fad1Sguenther	movq	CPUVAR(SCRATCH),%rax
6581fc8fad1Sguenther	swapgs
6591fc8fad1Sguenther	sysretq
6601fc8fad1SguentherKUTEXT_PAGE_END
661f5df1827Smickey
662b767b017Sguenther	.text
663b433e1a0Sguenther	_ALIGN_TRAPS
664c9de630fSguenther	/* in this case, need FS.base but not xstate, rarely happens */
665c9de630fSguenther.Lsyscall_restore_fsbase:	/* CPU doesn't have curproc's FS.base */
6660403d5bcSguenther	orl	$CPUPF_USERSEGS,CPUVAR(PFLAGS)
667c9de630fSguenther	movq	CPUVAR(CURPCB),%rdi
668c9de630fSguenther	jmp	.Lsyscall_restore_fsbase_real
669c9de630fSguenther
670b433e1a0Sguenther	_ALIGN_TRAPS
671c9de630fSguenther.Lsyscall_restore_xstate:	/* CPU doesn't have curproc's xstate */
6720403d5bcSguenther	orl	$(CPUPF_USERXSTATE|CPUPF_USERSEGS),CPUVAR(PFLAGS)
673c9de630fSguenther	movq	CPUVAR(CURPCB),%rdi
674c9de630fSguenther	movq	xsave_mask(%rip),%rdx
675c9de630fSguenther	movl	%edx,%eax
676c9de630fSguenther	shrq	$32,%rdx
677c9de630fSguenther#if PCB_SAVEFPU != 0
678c9de630fSguenther	addq	$PCB_SAVEFPU,%rdi
679c9de630fSguenther#endif
680c9de630fSguenther	/* untouched state so can't fault */
681c9de630fSguenther	CODEPATCH_START
682ae97d4fcSguenther	fxrstor64	(%rdi)
683*55fdb5faSguenther	CODEPATCH_END(CPTAG_XRSTORS)
684c9de630fSguenther#if PCB_SAVEFPU != 0
685c9de630fSguenther	subq	$PCB_SAVEFPU,%rdi
686c9de630fSguenther#endif
687c9de630fSguenther.Lsyscall_restore_fsbase_real:
688c9de630fSguenther	movq	PCB_FSBASE(%rdi),%rdx
689c9de630fSguenther	movl	%edx,%eax
690c9de630fSguenther	shrq	$32,%rdx
691c9de630fSguenther	movl	$MSR_FSBASE,%ecx
692c9de630fSguenther	wrmsr
693c9de630fSguenther	jmp	.Lsyscall_restore_registers
694b767b017Sguenther
695f5df1827Smickey#ifdef DIAGNOSTIC
696c9ad316fSguenther.Lsyscall_spl_not_lowered:
697be97ab8cSguenther	leaq	spl_lowered(%rip), %rdi
698c4495499Sguenther	movl	TF_ERR(%rsp),%esi	/* syscall # stashed above */
699f5df1827Smickey	movl	TF_RDI(%rsp),%edx
700f5df1827Smickey	movl	%ebx,%ecx
701b5b9857bSart	movl	CPUVAR(ILEVEL),%r8d
702f5df1827Smickey	xorq	%rax,%rax
7034ce05526Sguenther	call	printf
704f5df1827Smickey#ifdef DDB
705f5df1827Smickey	int	$3
706f5df1827Smickey#endif /* DDB */
707f5df1827Smickey	movl	$IPL_NONE,CPUVAR(ILEVEL)
708c9ad316fSguenther	jmp	.Lsyscall_check_asts
709f5df1827Smickey
71032d5845fSderaadt	.section .rodata
71132d5845fSderaadtspl_lowered:
71232d5845fSderaadt	.asciz	"WARNING: SPL NOT LOWERED ON SYSCALL %d %d EXIT %x %x\n"
71332d5845fSderaadt	.text
71432d5845fSderaadt#endif
715f5df1827Smickey
716f5df1827SmickeyNENTRY(proc_trampoline)
717f5df1827Smickey#ifdef MULTIPROCESSOR
7184ce05526Sguenther	call	proc_trampoline_mp
719f5df1827Smickey#endif
720f5df1827Smickey	movl	$IPL_NONE,CPUVAR(ILEVEL)
721f5df1827Smickey	movq	%r13,%rdi
722421775b1Sguenther	movq	%r12,%rax
723421775b1Sguenther	call	retpoline_rax
724c9ad316fSguenther	movq	CPUVAR(CURPROC),%r14
725c9ad316fSguenther	jmp	.Lsyscall_check_asts
726a324dee9SguentherEND(proc_trampoline)
727f5df1827Smickey
728f5df1827Smickey
729f5df1827Smickey/*
73031b8ac92Sguenther * Returning to userspace via iretq.  We do things in this order:
73131b8ac92Sguenther *  - check for ASTs
732c9de630fSguenther *  - restore FPU/"extended CPU state" if it's not already in the CPU
73331b8ac92Sguenther *  - DIAGNOSTIC: no more C calls after this, so check the SPL
73431b8ac92Sguenther *  - restore FS.base if it's not already in the CPU
735c9de630fSguenther *  - restore most registers
73631b8ac92Sguenther *  - update the iret frame from the trapframe
73731b8ac92Sguenther *  - finish reading from the trapframe
73831b8ac92Sguenther *  - switch to the trampoline stack	\
73931b8ac92Sguenther *  - jump to the .kutext segment	|-- Meltdown workaround
74031b8ac92Sguenther *  - switch to the user page tables	/
74131b8ac92Sguenther *  - swapgs
74231b8ac92Sguenther *  - iretq
7431396572dSguenther */
7441fc8fad1SguentherKTEXT_PAGE_START
7451fc8fad1Sguenther        _ALIGN_TRAPS
7461fc8fad1SguentherGENTRY(intr_user_exit)
747b767b017Sguenther#ifdef DIAGNOSTIC
748b767b017Sguenther	pushfq
749b767b017Sguenther	popq	%rdx
750b767b017Sguenther	testq	$PSL_I,%rdx
75131b8ac92Sguenther	jnz	.Lintr_user_exit_not_blocked
752b767b017Sguenther#endif /* DIAGNOSTIC */
75331b8ac92Sguenther
75431b8ac92Sguenther	/* Check for ASTs */
75531b8ac92Sguenther	CHECK_ASTPENDING(%r11)
75631b8ac92Sguenther	je	intr_user_exit_post_ast
75731b8ac92Sguenther	CLEAR_ASTPENDING(%r11)
75831b8ac92Sguenther	sti
75931b8ac92Sguenther	movq	%rsp,%rdi
7604ce05526Sguenther	call	ast
76131b8ac92Sguenther	cli
76231b8ac92Sguenther	jmp	intr_user_exit
76331b8ac92Sguenther
76431b8ac92Sguentherintr_user_exit_post_ast:
765c9de630fSguenther	/* Restore FPU/"extended CPU state" if it's not already in the CPU */
7660403d5bcSguenther	testl	$CPUPF_USERXSTATE,CPUVAR(PFLAGS)
767c9de630fSguenther	jz	.Lintr_restore_xstate
768c9de630fSguenther
76931b8ac92Sguenther#ifdef DIAGNOSTIC
77031b8ac92Sguenther	/* no more C calls after this, so check the SPL */
77131b8ac92Sguenther	cmpl	$0,CPUVAR(ILEVEL)
77231b8ac92Sguenther	jne	.Luser_spl_not_lowered
77331b8ac92Sguenther#endif /* DIAGNOSTIC */
77431b8ac92Sguenther
775c9de630fSguenther	/* Restore FS.base if it's not already in the CPU */
7760403d5bcSguenther	testl	$CPUPF_USERSEGS,CPUVAR(PFLAGS)
777c9de630fSguenther	jz	.Lintr_restore_fsbase
778c9de630fSguenther
779c9de630fSguenther.Lintr_restore_registers:
780e9e0c464Sderaadt	call	pku_xonly
781a4858df8Sguenther	RET_STACK_REFILL_WITH_RCX
782a4858df8Sguenther
7831396572dSguenther	movq	TF_R8(%rsp),%r8
7841396572dSguenther	movq	TF_R9(%rsp),%r9
7851396572dSguenther	movq	TF_R10(%rsp),%r10
7861396572dSguenther	movq	TF_R12(%rsp),%r12
7871396572dSguenther	movq	TF_R13(%rsp),%r13
7881396572dSguenther	movq	TF_R14(%rsp),%r14
7891396572dSguenther	movq	TF_R15(%rsp),%r15
790a0dcb178Sguenther
791a0dcb178Sguenther	CODEPATCH_START
792a0dcb178Sguenther	movw	%ds,TF_R8(%rsp)
793a0dcb178Sguenther	verw	TF_R8(%rsp)
794a0dcb178Sguenther	CODEPATCH_END(CPTAG_MDS)
795a0dcb178Sguenther
796a0dcb178Sguenther	movq	TF_RDI(%rsp),%rdi
797a0dcb178Sguenther	movq	TF_RSI(%rsp),%rsi
7981396572dSguenther	movq	TF_RBP(%rsp),%rbp
7991396572dSguenther	movq	TF_RBX(%rsp),%rbx
8001396572dSguenther
801b767b017Sguenther	/*
802b767b017Sguenther	 * To get the final value for the register that was used
803b767b017Sguenther	 * for the mov to %cr3, we need access to somewhere accessible
804b767b017Sguenther	 * on the user page tables, so we save it in CPUVAR(SCRATCH)
805b767b017Sguenther	 * across the switch.
806b767b017Sguenther	 */
807b767b017Sguenther	/* update iret frame */
808b767b017Sguenther	movq	CPUVAR(INTR_RSP),%rdx
809b767b017Sguenther	movq	$(GSEL(GUCODE_SEL,SEL_UPL)),IRETQ_CS(%rdx)
810b767b017Sguenther	movq	TF_RIP(%rsp),%rax
811b767b017Sguenther	movq	%rax,IRETQ_RIP(%rdx)
812b767b017Sguenther	movq	TF_RFLAGS(%rsp),%rax
813b767b017Sguenther	movq	%rax,IRETQ_RFLAGS(%rdx)
814b767b017Sguenther	movq	TF_RSP(%rsp),%rax
815b767b017Sguenther	movq	%rax,IRETQ_RSP(%rdx)
816b767b017Sguenther	movq	$(GSEL(GUDATA_SEL,SEL_UPL)),IRETQ_SS(%rdx)
817b767b017Sguenther	/* finish with the trap frame */
818b767b017Sguenther	movq	TF_RAX(%rsp),%rax
819b767b017Sguenther	movq	TF_RCX(%rsp),%rcx
820b767b017Sguenther	movq	TF_R11(%rsp),%r11
821b767b017Sguenther	/* switch to the trampoline stack */
822b767b017Sguenther	xchgq	%rdx,%rsp
823b767b017Sguenther	movq	TF_RDX(%rdx),%rdx
8241fc8fad1Sguenther	CODEPATCH_START
8251fc8fad1Sguenther	movq	%rax,CPUVAR(SCRATCH)
826b767b017Sguenther	movq	CPUVAR(USER_CR3),%rax
827f95e373fSguenther	PCID_SET_REUSE_NOP
828b767b017Sguenther	movq	%rax,%cr3
8291fc8fad1SguentherXiretq_trampback:
8301a7819b7SguentherKTEXT_PAGE_END
8311a7819b7Sguenther/* the movq %cr3 switches to this "KUTEXT" page */
8321a7819b7SguentherKUTEXT_PAGE_START
8331a7819b7Sguenther	.space	(Xiretq_trampback - Xsyscall_meltdown) - \
8341a7819b7Sguenther		(. - XUsyscall_meltdown), 0xcc
8351a7819b7Sguenther	movq	CPUVAR(SCRATCH),%rax
8361a7819b7Sguenther.Liretq_swapgs:
8371a7819b7Sguenther	swapgs
8381a7819b7Sguentherdoreti_iret_meltdown:
8391a7819b7Sguenther	iretq
8401a7819b7SguentherKUTEXT_PAGE_END
8411a7819b7Sguenther/*
8421a7819b7Sguenther * Back to the "KTEXT" page to fill in the speculation trap and the
8431a7819b7Sguenther * swapgs+iretq used for non-Meltdown kernels.  This switching back
8441a7819b7Sguenther * and forth between segments is so that we can do the .space
8451a7819b7Sguenther * calculation below to guarantee the iretq's above and below line
8461a7819b7Sguenther * up, so the 'doreti_iret' label lines up with the iretq whether
8471a7819b7Sguenther * the CPU is affected by Meltdown or not.
8481a7819b7Sguenther */
8491a7819b7SguentherKTEXT_PAGE_START
8501fc8fad1Sguenther0:	pause
851a1fa3538Sguenther	lfence
8521fc8fad1Sguenther	jmp	0b
8531a7819b7Sguenther	.space	(.Liretq_swapgs - XUsyscall_meltdown) - \
8541a7819b7Sguenther		(. - Xsyscall_meltdown), 0xcc
8551fc8fad1Sguenther	CODEPATCH_END(CPTAG_MELTDOWN_NOP)
856b767b017Sguenther	swapgs
857b767b017Sguenther
8584ce05526Sguenther	.globl	doreti_iret
8594ce05526Sguentherdoreti_iret:
860b767b017Sguenther	iretq
8611fc8fad1SguentherKTEXT_PAGE_END
8621fc8fad1Sguenther
86331b8ac92Sguenther	.text
864b433e1a0Sguenther	_ALIGN_TRAPS
865c9de630fSguenther.Lintr_restore_xstate:		/* CPU doesn't have curproc's xstate */
8660403d5bcSguenther	orl	$CPUPF_USERXSTATE,CPUVAR(PFLAGS)
867c9de630fSguenther	movq	CPUVAR(CURPCB),%rdi
868c9de630fSguenther#if PCB_SAVEFPU != 0
869c9de630fSguenther	addq	$PCB_SAVEFPU,%rdi
870c9de630fSguenther#endif
871*55fdb5faSguenther	movq	xsave_mask(%rip),%rdx
872*55fdb5faSguenther	movl	%edx,%eax
873*55fdb5faSguenther	shrq	$32, %rdx
874*55fdb5faSguenther	CODEPATCH_START
875*55fdb5faSguenther	fxrstor64	(%rdi)
876*55fdb5faSguenther	CODEPATCH_END(CPTAG_XRSTORS)
877*55fdb5faSguenther	//testl	%eax,%eax
878*55fdb5faSguenther	//jnz	.Lintr_xrstor_faulted
879c9de630fSguenther.Lintr_restore_fsbase:		/* CPU doesn't have curproc's FS.base */
8800403d5bcSguenther	orl	$CPUPF_USERSEGS,CPUVAR(PFLAGS)
881c9de630fSguenther	movq	CPUVAR(CURPCB),%rdx
882c9de630fSguenther	movq	PCB_FSBASE(%rdx),%rdx
883c9de630fSguenther	movl	%edx,%eax
884c9de630fSguenther	shrq	$32,%rdx
885c9de630fSguenther	movl	$MSR_FSBASE,%ecx
886c9de630fSguenther	wrmsr
887c9de630fSguenther	jmp	.Lintr_restore_registers
888c9de630fSguenther
889c9de630fSguenther.Lintr_xrstor_faulted:
890c9de630fSguenther	/*
891c9de630fSguenther	 * xrstor faulted; we need to reset the FPU state and call trap()
892c9de630fSguenther	 * to post a signal, which requires interrupts be enabled.
893c9de630fSguenther	 */
894c9de630fSguenther	sti
895c9de630fSguenther	movq	proc0paddr(%rip),%rdi
896c9de630fSguenther#if PCB_SAVEFPU != 0
897c9de630fSguenther	addq	$PCB_SAVEFPU,%rdi
898c9de630fSguenther#endif
899c9de630fSguenther	CODEPATCH_START
900ae97d4fcSguenther	fxrstor64	(%rdi)
901*55fdb5faSguenther	CODEPATCH_END(CPTAG_XRSTORS)
902c9de630fSguenther	movq	$T_PROTFLT,TF_TRAPNO(%rsp)
903c9de630fSguenther	jmp	recall_trap
904c9de630fSguenther
905c9de630fSguenther#ifdef DIAGNOSTIC
90631b8ac92Sguenther.Lintr_user_exit_not_blocked:
90731b8ac92Sguenther	movl	warn_once(%rip),%edi
90831b8ac92Sguenther	testl	%edi,%edi
90931b8ac92Sguenther	jnz	1f
91031b8ac92Sguenther	incl	%edi
91131b8ac92Sguenther	movl	%edi,warn_once(%rip)
91231b8ac92Sguenther	leaq	.Lnot_blocked(%rip),%rdi
9134ce05526Sguenther	call	printf
91431b8ac92Sguenther#ifdef DDB
91531b8ac92Sguenther	int	$3
91631b8ac92Sguenther#endif /* DDB */
91731b8ac92Sguenther1:	cli
91831b8ac92Sguenther	jmp	intr_user_exit
91931b8ac92Sguenther
92031b8ac92Sguenther.Luser_spl_not_lowered:
92131b8ac92Sguenther	sti
92231b8ac92Sguenther	leaq	intr_spl_lowered(%rip),%rdi
92331b8ac92Sguenther	movl	CPUVAR(ILEVEL),%esi
92431b8ac92Sguenther	xorl	%edx,%edx		/* always SPL zero for userspace */
92531b8ac92Sguenther	xorl	%eax,%eax
9264ce05526Sguenther	call	printf
92731b8ac92Sguenther#ifdef DDB
92831b8ac92Sguenther	int	$3
92931b8ac92Sguenther#endif /* DDB */
93031b8ac92Sguenther	movl	$0,CPUVAR(ILEVEL)
93131b8ac92Sguenther	cli
93231b8ac92Sguenther	jmp	intr_user_exit
93331b8ac92Sguenther
93431b8ac92Sguenther	.section .rodata
93531b8ac92Sguentherintr_spl_lowered:
93631b8ac92Sguenther	.asciz	"WARNING: SPL NOT LOWERED ON TRAP EXIT %x %x\n"
93731b8ac92Sguenther	.text
93831b8ac92Sguenther#endif /* DIAGNOSTIC */
939a324dee9SguentherEND(Xintr_user_exit)
94031b8ac92Sguenther
94131b8ac92Sguenther
94231b8ac92Sguenther/*
94331b8ac92Sguenther * Return to supervisor mode from trap or interrupt
94431b8ac92Sguenther */
94531b8ac92SguentherNENTRY(intr_fast_exit)
94631b8ac92Sguenther#ifdef DIAGNOSTIC
94731b8ac92Sguenther	pushfq
94831b8ac92Sguenther	popq	%rdx
94931b8ac92Sguenther	testq	$PSL_I,%rdx
95031b8ac92Sguenther	jnz	.Lintr_exit_not_blocked
95131b8ac92Sguenther#endif /* DIAGNOSTIC */
95231b8ac92Sguenther	movq	TF_RDI(%rsp),%rdi
95331b8ac92Sguenther	movq	TF_RSI(%rsp),%rsi
95431b8ac92Sguenther	movq	TF_R8(%rsp),%r8
95531b8ac92Sguenther	movq	TF_R9(%rsp),%r9
95631b8ac92Sguenther	movq	TF_R10(%rsp),%r10
95731b8ac92Sguenther	movq	TF_R12(%rsp),%r12
95831b8ac92Sguenther	movq	TF_R13(%rsp),%r13
95931b8ac92Sguenther	movq	TF_R14(%rsp),%r14
96031b8ac92Sguenther	movq	TF_R15(%rsp),%r15
96131b8ac92Sguenther	movq	TF_RBP(%rsp),%rbp
96231b8ac92Sguenther	movq	TF_RBX(%rsp),%rbx
963b767b017Sguenther	movq	TF_RDX(%rsp),%rdx
9641396572dSguenther	movq	TF_RCX(%rsp),%rcx
9651396572dSguenther	movq	TF_R11(%rsp),%r11
9661396572dSguenther	movq	TF_RAX(%rsp),%rax
9671396572dSguenther	addq	$TF_RIP,%rsp
968c6853312Sguenther	iretq
969c6853312Sguenther
970b767b017Sguenther#ifdef DIAGNOSTIC
971b767b017Sguenther.Lintr_exit_not_blocked:
972b767b017Sguenther	movl	warn_once(%rip),%edi
973b767b017Sguenther	testl	%edi,%edi
974b767b017Sguenther	jnz	1f
975b767b017Sguenther	incl	%edi
976b767b017Sguenther	movl	%edi,warn_once(%rip)
977b767b017Sguenther	leaq	.Lnot_blocked(%rip),%rdi
9784ce05526Sguenther	call	printf
979b767b017Sguenther#ifdef DDB
980b767b017Sguenther	int	$3
981b767b017Sguenther#endif /* DDB */
982b767b017Sguenther1:	cli
983b767b017Sguenther	jmp	intr_fast_exit
984b767b017Sguenther
985b767b017Sguenther	.data
986b767b017Sguenther.global warn_once
987b767b017Sguentherwarn_once:
988b767b017Sguenther	.long	0
989b767b017Sguenther	.section .rodata
990b767b017Sguenther.Lnot_blocked:
991b767b017Sguenther	.asciz	"WARNING: INTERRUPTS NOT BLOCKED ON INTERRUPT RETURN: 0x%x 0x%x\n"
992b767b017Sguenther	.text
993b767b017Sguenther#endif
994a324dee9SguentherEND(intr_fast_exit)
9956950c8e2Smpi
996c9de630fSguenther/*
997c9de630fSguenther * FPU/"extended CPU state" handling
998*55fdb5faSguenther *	void xrstor_kern(sfp, mask)
999*55fdb5faSguenther *		using first of xrstors/xrstor/fxrstor, load given state
1000*55fdb5faSguenther *		which is assumed to be trusted: i.e., unaltered from
1001*55fdb5faSguenther *		xsaves/xsaveopt/xsave/fxsave by kernel
1002c9de630fSguenther * 	int xrstor_user(sfp, mask)
1003*55fdb5faSguenther *		using first of xrstor/fxrstor, load given state which might
1004*55fdb5faSguenther *		not be trustable: #GP faults will be caught; returns 0/1 if
1005*55fdb5faSguenther *		okay/it trapped.
1006c9de630fSguenther *	void fpusave(sfp)
1007c9de630fSguenther *		save current state, but retain it in the FPU
1008c9de630fSguenther *	void fpusavereset(sfp)
1009c9de630fSguenther *		save current state and reset FPU to initial/kernel state
1010c4fce443Sguenther *	int xsetbv_user(reg, mask)
10114039a24bSjsg *		load specified %xcr# register, returns 0/1 if okay/it trapped
1012c9de630fSguenther */
1013c9de630fSguenther
1014*55fdb5faSguentherENTRY(xrstor_kern)
1015*55fdb5faSguenther	RETGUARD_SETUP(xrstor_kern, r11)
1016*55fdb5faSguenther	movq	%rsi, %rdx
1017*55fdb5faSguenther	movl	%esi, %eax
1018*55fdb5faSguenther	shrq	$32, %rdx
1019*55fdb5faSguenther	CODEPATCH_START
1020*55fdb5faSguenther	fxrstor64	(%rdi)
1021*55fdb5faSguenther	CODEPATCH_END(CPTAG_XRSTORS)
1022*55fdb5faSguenther	RETGUARD_CHECK(xrstor_kern, r11)
1023*55fdb5faSguenther	ret
1024*55fdb5faSguenther	lfence
1025*55fdb5faSguentherEND(xrstor_kern)
1026*55fdb5faSguenther
1027b1cdcaf5SguentherENTRY(xrstor_user)
1028db0a8dc5Smortimer	RETGUARD_SETUP(xrstor_user, r11)
1029b1cdcaf5Sguenther	movq	%rsi, %rdx
1030b1cdcaf5Sguenther	movl	%esi, %eax
1031b1cdcaf5Sguenther	shrq	$32, %rdx
1032b1cdcaf5Sguenther	.globl	xrstor_fault
1033b1cdcaf5Sguentherxrstor_fault:
1034c9de630fSguenther	CODEPATCH_START
1035ae97d4fcSguenther	fxrstor64	(%rdi)
1036c9de630fSguenther	CODEPATCH_END(CPTAG_XRSTOR)
1037198d2c0bSguenther	xorl	%eax, %eax
1038db0a8dc5Smortimer	RETGUARD_CHECK(xrstor_user, r11)
1039198d2c0bSguenther	ret
10403dd0809fSbluhm	lfence
1041be97ab8cSguentherNENTRY(xrstor_resume)
1042198d2c0bSguenther	movl	$1, %eax
1043db0a8dc5Smortimer	RETGUARD_CHECK(xrstor_user, r11)
1044b1cdcaf5Sguenther	ret
10453dd0809fSbluhm	lfence
1046c9de630fSguentherEND(xrstor_user)
1047c9de630fSguenther
1048c9de630fSguentherENTRY(fpusave)
1049db0a8dc5Smortimer	RETGUARD_SETUP(fpusave, r11)
1050c9de630fSguenther	movq	xsave_mask(%rip),%rdx
1051c9de630fSguenther	movl	%edx,%eax
1052c9de630fSguenther	shrq	$32,%rdx
1053c9de630fSguenther	CODEPATCH_START
1054ae97d4fcSguenther	fxsave64	(%rdi)
1055c9de630fSguenther	CODEPATCH_END(CPTAG_XSAVE)
1056db0a8dc5Smortimer	RETGUARD_CHECK(fpusave, r11)
1057c9de630fSguenther	ret
10583dd0809fSbluhm	lfence
1059c9de630fSguentherEND(fpusave)
1060c9de630fSguenther
1061c9de630fSguentherENTRY(fpusavereset)
1062db0a8dc5Smortimer	RETGUARD_SETUP(fpusavereset, r11)
1063c9de630fSguenther	movq	xsave_mask(%rip),%rdx
1064c9de630fSguenther	movl	%edx,%eax
1065c9de630fSguenther	shrq	$32,%rdx
1066c9de630fSguenther	CODEPATCH_START
1067ae97d4fcSguenther	fxsave64	(%rdi)
1068c9de630fSguenther	CODEPATCH_END(CPTAG_XSAVE)
1069c9de630fSguenther	movq	proc0paddr(%rip),%rdi
1070c9de630fSguenther#if PCB_SAVEFPU != 0
1071c9de630fSguenther	addq	$PCB_SAVEFPU,%rdi
1072c9de630fSguenther#endif
1073c9de630fSguenther	CODEPATCH_START
1074ae97d4fcSguenther	fxrstor64	(%rdi)
1075*55fdb5faSguenther	CODEPATCH_END(CPTAG_XRSTORS)
1076db0a8dc5Smortimer	RETGUARD_CHECK(fpusavereset, r11)
1077c9de630fSguenther	ret
10783dd0809fSbluhm	lfence
1079c9de630fSguentherEND(fpusavereset)
1080c9de630fSguenther
1081c4fce443SguentherENTRY(xsetbv_user)
1082c4fce443Sguenther	RETGUARD_SETUP(xsetbv_user, r11)
1083c4fce443Sguenther	movl	%edi, %ecx
1084c4fce443Sguenther	movq	%rsi, %rdx
1085c4fce443Sguenther	movl	%esi, %eax
1086c4fce443Sguenther	shrq	$32, %rdx
1087c4fce443Sguenther	.globl	xsetbv_fault
1088c4fce443Sguentherxsetbv_fault:
1089c4fce443Sguenther	xsetbv
1090c4fce443Sguenther	xorl	%eax, %eax
1091c4fce443Sguenther	RETGUARD_CHECK(xsetbv_user, r11)
1092c4fce443Sguenther	ret
10933dd0809fSbluhm	lfence
1094c4fce443SguentherNENTRY(xsetbv_resume)
1095c4fce443Sguenther	movl	$1, %eax
1096c4fce443Sguenther	RETGUARD_CHECK(xsetbv_user, r11)
1097c4fce443Sguenther	ret
10983dd0809fSbluhm	lfence
1099c4fce443SguentherEND(xsetbv_user)
1100c4fce443Sguenther
1101c9de630fSguenther	.section .rodata
11024ce05526Sguenther	.globl	_xrstor
11034ce05526Sguenther_xrstor:
1104ae97d4fcSguenther	xrstor64	(%rdi)
1105c9de630fSguenther
1106*55fdb5faSguenther	.globl	_xrstors
1107*55fdb5faSguenther_xrstors:
1108*55fdb5faSguenther	xrstors64	(%rdi)
1109*55fdb5faSguenther
11104ce05526Sguenther	.globl	_xsave
11114ce05526Sguenther_xsave:
1112ae97d4fcSguenther	xsave64		(%rdi)
1113c9de630fSguenther
1114*55fdb5faSguenther	.globl	_xsaves
1115*55fdb5faSguenther_xsaves:
1116*55fdb5faSguenther	xsaves64	(%rdi)
1117*55fdb5faSguenther
11184ce05526Sguenther	.globl	_xsaveopt
11194ce05526Sguenther_xsaveopt:
1120ae97d4fcSguenther	xsaveopt64	(%rdi)
1121b1cdcaf5Sguenther
11224ce05526Sguenther	.globl	_pcid_set_reuse
11234ce05526Sguenther_pcid_set_reuse:
1124f95e373fSguenther	orl	$(CR3_REUSE_PCID >> 32),CPUVAR(USER_CR3 + 4)
1125f95e373fSguenther
11263a36161cSartENTRY(pagezero)
1127db0a8dc5Smortimer	RETGUARD_SETUP(pagezero, r11)
11283a36161cSart	movq    $-PAGE_SIZE,%rdx
11293a36161cSart	subq    %rdx,%rdi
11303a36161cSart	xorq    %rax,%rax
11313a36161cSart1:
11323a36161cSart	movnti  %rax,(%rdi,%rdx)
11333a36161cSart	movnti  %rax,8(%rdi,%rdx)
11343a36161cSart	movnti  %rax,16(%rdi,%rdx)
11353a36161cSart	movnti  %rax,24(%rdi,%rdx)
11363a36161cSart	addq    $32,%rdx
11373a36161cSart	jne     1b
11383a36161cSart	sfence
1139db0a8dc5Smortimer	RETGUARD_CHECK(pagezero, r11)
11403a36161cSart	ret
11413dd0809fSbluhm	lfence
1142a324dee9SguentherEND(pagezero)
11433c8478a6Sgwk
1144e9e0c464Sderaadt/* void pku_xonly(void) */
1145e9e0c464SderaadtENTRY(pku_xonly)
1146e9e0c464Sderaadt	movq	pg_xo,%rax	/* have PKU support? */
1147e9e0c464Sderaadt	cmpq	$0,%rax
1148e9e0c464Sderaadt	je	1f
1149e9e0c464Sderaadt	movl	$0,%ecx		/* force PKRU for xonly restriction */
1150e9e0c464Sderaadt	movl	$0,%edx
1151e9e0c464Sderaadt	movl	$PGK_VALUE,%eax	/* key0 normal, key1 is exec without read */
1152e9e0c464Sderaadt	wrpkru
1153e9e0c464Sderaadt1:	ret
1154e9e0c464Sderaadt	lfence
1155e9e0c464SderaadtEND(pku_xonly)
1156e9e0c464Sderaadt
11576f4c4614Smlarkin/* int rdmsr_safe(u_int msr, uint64_t *data) */
11586f4c4614SmlarkinENTRY(rdmsr_safe)
11593fc877c6Smortimer	RETGUARD_SETUP(rdmsr_safe, r10)
11606f4c4614Smlarkin
11616f4c4614Smlarkin	movl	%edi,	%ecx	/* u_int msr */
11626f4c4614Smlarkin	.globl	rdmsr_safe_fault
11636f4c4614Smlarkinrdmsr_safe_fault:
11646f4c4614Smlarkin	rdmsr
11656f4c4614Smlarkin	salq	$32, %rdx
11666f4c4614Smlarkin	movl	%eax, %eax
11676f4c4614Smlarkin	orq	%rdx, %rax
11686f4c4614Smlarkin	movq	%rax, (%rsi)	/* *data */
11696f4c4614Smlarkin	xorq	%rax, %rax
11706f4c4614Smlarkin
11713fc877c6Smortimer	RETGUARD_CHECK(rdmsr_safe, r10)
11726f4c4614Smlarkin	ret
11733dd0809fSbluhm	lfence
11746f4c4614Smlarkin
11756f4c4614SmlarkinNENTRY(rdmsr_resume)
11766f4c4614Smlarkin	movl	$0x1, %eax
11773fc877c6Smortimer	RETGUARD_CHECK(rdmsr_safe, r10)
11786f4c4614Smlarkin	ret
11793dd0809fSbluhm	lfence
1180a324dee9SguentherEND(rdmsr_safe)
11816f4c4614Smlarkin
1182bc3c2f61Santon#if NHYPERV > 0
1183bc3c2f61Santon/* uint64_t hv_hypercall_trampoline(uint64_t control, paddr_t input, paddr_t output) */
1184bc3c2f61SantonNENTRY(hv_hypercall_trampoline)
1185bc3c2f61Santon	endbr64
1186bc3c2f61Santon	mov	%rdx, %r8
1187bc3c2f61Santon	mov	%rsi, %rdx
1188bc3c2f61Santon	mov	%rdi, %rcx
1189bc3c2f61Santon	jmp	hv_hypercall_page
1190bc3c2f61SantonEND(hv_hypercall_trampoline)
1191bc3c2f61Santon	/* Hypercall page needs to be page aligned */
1192bc3c2f61Santon	.text
1193bc3c2f61Santon	.align	NBPG, 0xcc
1194bc3c2f61Santon	.globl	hv_hypercall_page
1195bc3c2f61Santonhv_hypercall_page:
1196bc3c2f61Santon	.skip	0x1000, 0xcc
1197bc3c2f61Santon#endif /* NHYPERV > 0 */
1198bc3c2f61Santon
1199d8213a49Smikeb#if NXEN > 0
1200d8213a49Smikeb	/* Hypercall page needs to be page aligned */
1201d8213a49Smikeb	.text
12020175496dSderaadt	.align	NBPG, 0xcc
12034ce05526Sguenther	.globl	xen_hypercall_page
12044ce05526Sguentherxen_hypercall_page:
12050175496dSderaadt	.skip	0x1000, 0xcc
1206d8213a49Smikeb#endif /* NXEN > 0 */
1207