xref: /freebsd/sys/arm64/arm64/exception.S (revision 783d3ff6)
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28#include <machine/asm.h>
29#include <machine/armreg.h>
30#include "assym.inc"
31
32	.text
33
34/*
35 * This is limited to 28 instructions as it's placed in the exception vector
36 * slot that is 32 instructions long. We need one for the branch, and three
37 * for the prologue.
38 */
39.macro	save_registers_head el
40.if \el == 1
41	mov	x18, sp
42	stp	x0,  x1,  [sp, #(TF_X - TF_SIZE - 128)]!
43.else
44	stp	x0,  x1,  [sp, #(TF_X - TF_SIZE)]!
45.endif
46	stp	x2,  x3,  [sp, #(2  * 8)]
47	stp	x4,  x5,  [sp, #(4  * 8)]
48	stp	x6,  x7,  [sp, #(6  * 8)]
49	stp	x8,  x9,  [sp, #(8  * 8)]
50	stp	x10, x11, [sp, #(10 * 8)]
51	stp	x12, x13, [sp, #(12 * 8)]
52	stp	x14, x15, [sp, #(14 * 8)]
53	stp	x16, x17, [sp, #(16 * 8)]
54	stp	x18, x19, [sp, #(18 * 8)]
55	stp	x20, x21, [sp, #(20 * 8)]
56	stp	x22, x23, [sp, #(22 * 8)]
57	stp	x24, x25, [sp, #(24 * 8)]
58	stp	x26, x27, [sp, #(26 * 8)]
59	stp	x28, x29, [sp, #(28 * 8)]
60.if \el == 0
61	mrs	x18, sp_el0
62.endif
63	mrs	x10, elr_el1
64	mrs	x11, spsr_el1
65	mrs	x12, esr_el1
66	mrs	x13, far_el1
67	stp	x18,  lr, [sp, #(TF_SP - TF_X)]!
68	stp	x10, x11, [sp, #(TF_ELR)]
69	stp	x12, x13, [sp, #(TF_ESR)]
70	mrs	x18, tpidr_el1
71.endm
72
73.macro	save_registers el
74	add	x29, sp, #(TF_SIZE)
75.if \el == 0
76#if defined(PERTHREAD_SSP)
77	/* Load the SSP canary to sp_el0 */
78	ldr	x1, [x18, #(PC_CURTHREAD)]
79	add	x1, x1, #(TD_MD_CANARY)
80	msr	sp_el0, x1
81#endif
82
83	/* Apply the SSBD (CVE-2018-3639) workaround if needed */
84	ldr	x1, [x18, #PC_SSBD]
85	cbz	x1, 1f
86	mov	w0, #1
87	blr	x1
881:
89
90	ldr	x0, [x18, #PC_CURTHREAD]
91	bl	ptrauth_exit_el0
92
93	ldr	x0, [x18, #(PC_CURTHREAD)]
94	bl	dbg_monitor_enter
95
96	/* Unmask debug and SError exceptions */
97	msr	daifclr, #(DAIF_D | DAIF_A)
98.else
99	/*
100	 * Unmask debug and SError exceptions.
101	 * For EL1, debug exceptions are conditionally unmasked in
102	 * do_el1h_sync().
103	 */
104	msr	daifclr, #(DAIF_A)
105.endif
106.endm
107
108.macro	restore_registers el
109	/*
110	 * Mask all exceptions, x18 may change in the interrupt exception
111	 * handler.
112	 */
113	msr	daifset, #(DAIF_ALL)
114.if \el == 0
115	ldr	x0, [x18, #PC_CURTHREAD]
116	mov	x1, sp
117	bl	dbg_monitor_exit
118
119	ldr	x0, [x18, #PC_CURTHREAD]
120	bl	ptrauth_enter_el0
121
122	/* Remove the SSBD (CVE-2018-3639) workaround if needed */
123	ldr	x1, [x18, #PC_SSBD]
124	cbz	x1, 1f
125	mov	w0, #0
126	blr	x1
1271:
128.endif
129	ldp	x18,  lr, [sp, #(TF_SP)]
130	ldp	x10, x11, [sp, #(TF_ELR)]
131.if \el == 0
132	msr	sp_el0, x18
133.endif
134	msr	spsr_el1, x11
135	msr	elr_el1, x10
136	ldp	x0,  x1,  [sp, #(TF_X + 0  * 8)]
137	ldp	x2,  x3,  [sp, #(TF_X + 2  * 8)]
138	ldp	x4,  x5,  [sp, #(TF_X + 4  * 8)]
139	ldp	x6,  x7,  [sp, #(TF_X + 6  * 8)]
140	ldp	x8,  x9,  [sp, #(TF_X + 8  * 8)]
141	ldp	x10, x11, [sp, #(TF_X + 10 * 8)]
142	ldp	x12, x13, [sp, #(TF_X + 12 * 8)]
143	ldp	x14, x15, [sp, #(TF_X + 14 * 8)]
144	ldp	x16, x17, [sp, #(TF_X + 16 * 8)]
145.if \el == 0
146	/*
147	 * We only restore the callee saved registers when returning to
148	 * userland as they may have been updated by a system call or signal.
149	 */
150	ldp	x18, x19, [sp, #(TF_X + 18 * 8)]
151	ldp	x20, x21, [sp, #(TF_X + 20 * 8)]
152	ldp	x22, x23, [sp, #(TF_X + 22 * 8)]
153	ldp	x24, x25, [sp, #(TF_X + 24 * 8)]
154	ldp	x26, x27, [sp, #(TF_X + 26 * 8)]
155	ldp	x28, x29, [sp, #(TF_X + 28 * 8)]
156.else
157	ldr	     x29, [sp, #(TF_X + 29 * 8)]
158.endif
159.if \el == 0
160	add	sp, sp, #(TF_SIZE)
161.else
162	mov	sp, x18
163	mrs	x18, tpidr_el1
164.endif
165.endm
166
167.macro	do_ast
168	mrs	x19, daif
169	/* Make sure the IRQs are enabled before calling ast() */
170	bic	x19, x19, #PSR_I
1711:
172	/*
173	 * Mask interrupts while checking the ast pending flag
174	 */
175	msr	daifset, #(DAIF_INTR)
176
177	/* Read the current thread AST mask */
178	ldr	x1, [x18, #PC_CURTHREAD]	/* Load curthread */
179	ldr	w1, [x1, #(TD_AST)]
180
181	/* Check if we have a non-zero AST mask */
182	cbz	w1, 2f
183
184	/* Restore interrupts */
185	msr	daif, x19
186
187	/* handle the ast */
188	mov	x0, sp
189	bl	_C_LABEL(ast)
190
191	/* Re-check for new ast scheduled */
192	b	1b
1932:
194.endm
195
196#ifdef KMSAN
197/*
198 * The KMSAN runtime relies on a TLS block to track initialization and origin
199 * state for function parameters and return values.  To keep this state
200 * consistent in the face of asynchronous kernel-mode traps, the runtime
201 * maintains a stack of blocks: when handling an exception or interrupt,
202 * kmsan_intr_enter() pushes the new block to be used until the handler is
203 * complete, at which point kmsan_intr_leave() restores the previous block.
204 *
205 * Thus, KMSAN_ENTER/LEAVE hooks are required only in handlers for events that
206 * may have happened while in kernel-mode.  In particular, they are not required
207 * around amd64_syscall() or ast() calls.  Otherwise, kmsan_intr_enter() can be
208 * called unconditionally, without distinguishing between entry from user-mode
209 * or kernel-mode.
210 */
211#define	KMSAN_ENTER	bl kmsan_intr_enter
212#define	KMSAN_LEAVE	bl kmsan_intr_leave
213#else
214#define	KMSAN_ENTER
215#define	KMSAN_LEAVE
216#endif
217
218ENTRY(handle_el1h_sync)
219	save_registers 1
220	KMSAN_ENTER
221	ldr	x0, [x18, #PC_CURTHREAD]
222	mov	x1, sp
223	bl	do_el1h_sync
224	KMSAN_LEAVE
225	restore_registers 1
226	ERET
227END(handle_el1h_sync)
228
229ENTRY(handle_el1h_irq)
230	save_registers 1
231	KMSAN_ENTER
232	mov	x0, sp
233	bl	intr_irq_handler
234	KMSAN_LEAVE
235	restore_registers 1
236	ERET
237END(handle_el1h_irq)
238
239ENTRY(handle_el0_sync)
240	save_registers 0
241	KMSAN_ENTER
242	ldr	x0, [x18, #PC_CURTHREAD]
243	mov	x1, sp
244	str	x1, [x0, #TD_FRAME]
245	bl	do_el0_sync
246	do_ast
247	KMSAN_LEAVE
248	restore_registers 0
249	ERET
250END(handle_el0_sync)
251
252ENTRY(handle_el0_irq)
253	save_registers 0
254	KMSAN_ENTER
255	mov	x0, sp
256	bl	intr_irq_handler
257	do_ast
258	KMSAN_LEAVE
259	restore_registers 0
260	ERET
261END(handle_el0_irq)
262
263ENTRY(handle_serror)
264	save_registers 0
265	KMSAN_ENTER
266	mov	x0, sp
2671:	bl	do_serror
268	b	1b
269	KMSAN_LEAVE
270END(handle_serror)
271
272ENTRY(handle_empty_exception)
273	save_registers 0
274	KMSAN_ENTER
275	mov	x0, sp
2761:	bl	unhandled_exception
277	b	1b
278	KMSAN_LEAVE
279END(handle_empty_exception)
280
281.macro	vector	name, el
282	.align 7
283	save_registers_head \el
284	b	handle_\name
285	dsb	sy
286	isb
287	/* Break instruction to ensure we aren't executing code here. */
288	brk	0x42
289.endm
290
291.macro	vempty el
292	vector	empty_exception \el
293.endm
294
295	.align 11
296	.globl exception_vectors
297exception_vectors:
298	vempty 1		/* Synchronous EL1t */
299	vempty 1		/* IRQ EL1t */
300	vempty 1		/* FIQ EL1t */
301	vempty 1		/* Error EL1t */
302
303	vector el1h_sync 1	/* Synchronous EL1h */
304	vector el1h_irq 1	/* IRQ EL1h */
305	vempty 1		/* FIQ EL1h */
306	vector serror 1		/* Error EL1h */
307
308	vector el0_sync 0	/* Synchronous 64-bit EL0 */
309	vector el0_irq 0	/* IRQ 64-bit EL0 */
310	vempty 0		/* FIQ 64-bit EL0 */
311	vector serror 0		/* Error 64-bit EL0 */
312
313	vector el0_sync 0	/* Synchronous 32-bit EL0 */
314	vector el0_irq 0	/* IRQ 32-bit EL0 */
315	vempty 0		/* FIQ 32-bit EL0 */
316	vector serror 0		/* Error 32-bit EL0 */
317
318