xref: /freebsd/sys/arm64/arm64/swtch.S (revision 783d3ff6)
1/*-
2 * Copyright (c) 2014 Andrew Turner
3 * Copyright (c) 2014 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * This software was developed by Andrew Turner under sponsorship from
7 * the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 */
31
32#include "assym.inc"
33#include "opt_kstack_pages.h"
34#include "opt_sched.h"
35
36#include <machine/asm.h>
37#include <machine/armreg.h>
38.macro clear_step_flag pcbflags, tmp
39	tbz	\pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
40	mrs	\tmp, mdscr_el1
41	bic	\tmp, \tmp, #MDSCR_SS
42	msr	mdscr_el1, \tmp
43	isb
44999:
45.endm
46
47.macro set_step_flag pcbflags, tmp
48	tbz	\pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
49	mrs	\tmp, mdscr_el1
50	orr	\tmp, \tmp, #MDSCR_SS
51	msr	mdscr_el1, \tmp
52	isb
53999:
54.endm
55
56/*
57 * void cpu_throw(struct thread *old, struct thread *new)
58 */
59ENTRY(cpu_throw)
60	/* Of old == NULL skip disabling stepping */
61	cbz	x0, 1f
62
63	/* If we were single stepping, disable it */
64	ldr	x4, [x0, #TD_PCB]
65	ldr	w5, [x4, #PCB_FLAGS]
66	clear_step_flag w5, x6
671:
68
69#ifdef VFP
70	/* Backup the new thread pointer around a call to C code */
71	mov	x19, x1
72	bl	vfp_discard
73	mov	x0, x19
74#else
75	mov	x0, x1
76#endif
77
78	/* This returns the thread pointer so no need to save it */
79	bl	ptrauth_switch
80#ifdef PERTHREAD_SSP
81	mov	x19, x0
82#endif
83	/* This returns the thread pcb */
84	bl	pmap_switch
85	mov	x4, x0
86#ifdef PERTHREAD_SSP
87	/* Update the per-thread stack canary pointer. */
88	add	x19, x19, #(TD_MD_CANARY)
89	msr	sp_el0, x19
90#endif
91
92	/* If we are single stepping, enable it */
93	ldr	w5, [x4, #PCB_FLAGS]
94	set_step_flag w5, x6
95
96	/* Restore the registers */
97	ldp	x5, x6, [x4, #PCB_SP]
98	mov	sp, x5
99	msr	tpidr_el0, x6
100	ldr	x6, [x4, #PCB_TPIDRRO]
101	msr	tpidrro_el0, x6
102	ldp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
103	ldp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
104	ldp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
105	ldp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
106	ldp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
107	ldp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
108
109	ret
110END(cpu_throw)
111
112/*
113 * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
114 *
115 * x0 = old
116 * x1 = new
117 * x2 = mtx
118 * x3 to x7, x16 and x17 are caller saved
119 */
120ENTRY(cpu_switch)
121	/*
122	 * Save the old context.
123	 */
124	ldr	x4, [x0, #TD_PCB]
125
126	/* Store the callee-saved registers */
127	stp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
128	stp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
129	stp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
130	stp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
131	stp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
132	stp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
133	/* And the old stack pointer */
134	mov	x5, sp
135	mrs	x6, tpidrro_el0
136	str	x6, [x4, #PCB_TPIDRRO]
137	mrs	x6, tpidr_el0
138	stp	x5, x6, [x4, #PCB_SP]
139
140	/* If we were single stepping, disable it */
141	ldr	w5, [x4, #PCB_FLAGS]
142	clear_step_flag w5, x6
143
144	mov	x19, x0
145	mov	x20, x1
146	mov	x21, x2
147
148#ifdef VFP
149	/* Load the pcb address */
150	mov	x1, x4
151	bl	vfp_save_state
152	mov	x0, x20
153#else
154	mov	x0, x1
155#endif
156
157	/* This returns the thread pointer so no need to save it */
158	bl	ptrauth_switch
159	/* This returns the thread pcb */
160	bl	pmap_switch
161	/* Move the new pcb out of the way */
162	mov	x4, x0
163
164	mov	x2, x21
165	mov	x1, x20
166	mov	x0, x19
167#ifdef PERTHREAD_SSP
168	/* Update the per-thread stack canary pointer. */
169	add	x20, x20, #(TD_MD_CANARY)
170	msr	sp_el0, x20
171#endif
172
173	/*
174	 * Release the old thread.
175	 */
176	stlr	x2, [x0, #TD_LOCK]
177#if defined(SCHED_ULE) && defined(SMP)
178	/* Spin if TD_LOCK points to a blocked_lock */
179	ldr	x2, =_C_LABEL(blocked_lock)
1801:
181	ldar	x3, [x1, #TD_LOCK]
182	cmp	x3, x2
183	b.eq	1b
184#endif
185
186	/* If we are single stepping, enable it */
187	ldr	w5, [x4, #PCB_FLAGS]
188	set_step_flag w5, x6
189
190	/* Restore the registers */
191	ldp	x5, x6, [x4, #PCB_SP]
192	mov	sp, x5
193	msr	tpidr_el0, x6
194	ldr	x6, [x4, #PCB_TPIDRRO]
195	msr	tpidrro_el0, x6
196	ldp	x19, x20, [x4, #PCB_REGS + (PCB_X19 + 0) * 8]
197	ldp	x21, x22, [x4, #PCB_REGS + (PCB_X19 + 2) * 8]
198	ldp	x23, x24, [x4, #PCB_REGS + (PCB_X19 + 4) * 8]
199	ldp	x25, x26, [x4, #PCB_REGS + (PCB_X19 + 6) * 8]
200	ldp	x27, x28, [x4, #PCB_REGS + (PCB_X19 + 8) * 8]
201	ldp	x29, lr, [x4, #PCB_REGS + (PCB_X19 + 10) * 8]
202
203	ret
204END(cpu_switch)
205
206ENTRY(fork_trampoline)
207	mov	x0, x19
208	mov	x1, x20
209	mov	x2, sp
210	mov	fp, #0	/* Stack traceback stops here. */
211	bl	_C_LABEL(fork_exit)
212
213	/*
214	 * Disable interrupts as we are setting userspace specific
215	 * state that we won't handle correctly in an interrupt while
216	 * in the kernel.
217	 */
218	msr	daifset, #(DAIF_D | DAIF_INTR)
219
220	ldr	x0, [x18, #PC_CURTHREAD]
221	bl	ptrauth_enter_el0
222
223	/* Restore sp, lr, elr, and spsr */
224	ldp	x18, lr, [sp, #TF_SP]
225	ldp	x10, x11, [sp, #TF_ELR]
226	msr	sp_el0, x18
227	msr	spsr_el1, x11
228	msr	elr_el1, x10
229
230	/* Restore the CPU registers */
231	ldp	x0, x1, [sp, #TF_X + 0 * 8]
232	ldp	x2, x3, [sp, #TF_X + 2 * 8]
233	ldp	x4, x5, [sp, #TF_X + 4 * 8]
234	ldp	x6, x7, [sp, #TF_X + 6 * 8]
235	ldp	x8, x9, [sp, #TF_X + 8 * 8]
236	ldp	x10, x11, [sp, #TF_X + 10 * 8]
237	ldp	x12, x13, [sp, #TF_X + 12 * 8]
238	ldp	x14, x15, [sp, #TF_X + 14 * 8]
239	ldp	x16, x17, [sp, #TF_X + 16 * 8]
240	ldp	x18, x19, [sp, #TF_X + 18 * 8]
241	ldp	x20, x21, [sp, #TF_X + 20 * 8]
242	ldp	x22, x23, [sp, #TF_X + 22 * 8]
243	ldp	x24, x25, [sp, #TF_X + 24 * 8]
244	ldp	x26, x27, [sp, #TF_X + 26 * 8]
245	ldp	x28, x29, [sp, #TF_X + 28 * 8]
246
247	/*
248	 * No need for interrupts reenabling since PSR
249	 * will be set to the desired value anyway.
250	 */
251	ERET
252
253END(fork_trampoline)
254
255ENTRY(savectx)
256	/* Store the callee-saved registers */
257	stp	x19, x20, [x0, #PCB_REGS + (PCB_X19 + 0) * 8]
258	stp	x21, x22, [x0, #PCB_REGS + (PCB_X19 + 2) * 8]
259	stp	x23, x24, [x0, #PCB_REGS + (PCB_X19 + 4) * 8]
260	stp	x25, x26, [x0, #PCB_REGS + (PCB_X19 + 6) * 8]
261	stp	x27, x28, [x0, #PCB_REGS + (PCB_X19 + 8) * 8]
262	stp	x29, lr, [x0, #PCB_REGS + (PCB_X19 + 10) * 8]
263	/* And the old stack pointer */
264	mov	x5, sp
265	mrs	x6, tpidrro_el0
266	str	x6, [x0, #PCB_TPIDRRO]
267	mrs	x6, tpidr_el0
268	stp	x5, x6, [x0, #PCB_SP]
269
270	/* Store the VFP registers */
271#ifdef VFP
272	mov	x28, lr
273	bl	vfp_save_state_savectx
274	mov	lr, x28
275#endif
276
277	ret
278END(savectx)
279
280