xref: /freebsd/sys/arm64/arm64/vm_machdep.c (revision 3bbe8ed1)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_platform.h"
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/limits.h>
33 #include <sys/proc.h>
34 #include <sys/sf_buf.h>
35 #include <sys/signal.h>
36 #include <sys/sysent.h>
37 #include <sys/unistd.h>
38 
39 #include <vm/vm.h>
40 #include <vm/vm_page.h>
41 #include <vm/vm_map.h>
42 #include <vm/uma.h>
43 #include <vm/uma_int.h>
44 
45 #include <machine/armreg.h>
46 #include <machine/cpu.h>
47 #include <machine/md_var.h>
48 #include <machine/pcb.h>
49 #include <machine/frame.h>
50 
51 #ifdef VFP
52 #include <machine/vfp.h>
53 #endif
54 
55 #include <dev/psci/psci.h>
56 
57 /*
58  * psci.c is "default" in ARM64 kernel config files
59  * psci_reset will do nothing until/unless the psci device probes/attaches.
60  * Therefore, it is safe to default the cpu_reset_hook to psci_reset.
61  */
62 cpu_reset_hook_t cpu_reset_hook = psci_reset;
63 
64 /*
65  * Finish a fork operation, with process p2 nearly set up.
66  * Copy and update the pcb, set up the stack so that the child
67  * ready to run and return to user mode.
68  */
69 void
cpu_fork(struct thread * td1,struct proc * p2,struct thread * td2,int flags)70 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
71 {
72 	struct pcb *pcb2;
73 	struct trapframe *tf;
74 
75 	if ((flags & RFPROC) == 0)
76 		return;
77 
78 	if (td1 == curthread) {
79 		/*
80 		 * Save the tpidr_el0 and the vfp state, these normally happen
81 		 * in cpu_switch, but if userland changes these then forks
82 		 * this may not have happened.
83 		 */
84 		td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0);
85 		td1->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
86 #ifdef VFP
87 		if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0)
88 			vfp_save_state(td1, td1->td_pcb);
89 #endif
90 	}
91 
92 	pcb2 = (struct pcb *)(td2->td_kstack +
93 	    td2->td_kstack_pages * PAGE_SIZE) - 1;
94 
95 	td2->td_pcb = pcb2;
96 	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
97 
98 	/* Clear the debug register state. */
99 	bzero(&pcb2->pcb_dbg_regs, sizeof(pcb2->pcb_dbg_regs));
100 
101 	ptrauth_fork(td2, td1);
102 
103 	tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
104 	bcopy(td1->td_frame, tf, sizeof(*tf));
105 	tf->tf_x[0] = 0;
106 	tf->tf_x[1] = 0;
107 	tf->tf_spsr = td1->td_frame->tf_spsr & (PSR_M_32 | PSR_DAIF);
108 
109 	td2->td_frame = tf;
110 
111 	/* Set the return value registers for fork() */
112 	td2->td_pcb->pcb_x[PCB_X19] = (uintptr_t)fork_return;
113 	td2->td_pcb->pcb_x[PCB_X20] = (uintptr_t)td2;
114 	td2->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
115 	td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame;
116 
117 	vfp_new_thread(td2, td1, true);
118 
119 	/* Setup to release spin count in fork_exit(). */
120 	td2->td_md.md_spinlock_count = 1;
121 	td2->td_md.md_saved_daif = PSR_DAIF_DEFAULT;
122 
123 #if defined(PERTHREAD_SSP)
124 	/* Set the new canary */
125 	arc4random_buf(&td2->td_md.md_canary, sizeof(td2->td_md.md_canary));
126 #endif
127 }
128 
129 void
cpu_reset(void)130 cpu_reset(void)
131 {
132 
133 	cpu_reset_hook();
134 
135 	printf("cpu_reset failed");
136 	while(1)
137 		__asm volatile("wfi" ::: "memory");
138 }
139 
140 void
cpu_thread_swapin(struct thread * td)141 cpu_thread_swapin(struct thread *td)
142 {
143 }
144 
145 void
cpu_thread_swapout(struct thread * td)146 cpu_thread_swapout(struct thread *td)
147 {
148 }
149 
150 void
cpu_set_syscall_retval(struct thread * td,int error)151 cpu_set_syscall_retval(struct thread *td, int error)
152 {
153 	struct trapframe *frame;
154 
155 	frame = td->td_frame;
156 
157 	if (__predict_true(error == 0)) {
158 		frame->tf_x[0] = td->td_retval[0];
159 		frame->tf_x[1] = td->td_retval[1];
160 		frame->tf_spsr &= ~PSR_C;	/* carry bit */
161 		return;
162 	}
163 
164 	switch (error) {
165 	case ERESTART:
166 		frame->tf_elr -= 4;
167 		break;
168 	case EJUSTRETURN:
169 		break;
170 	default:
171 		frame->tf_spsr |= PSR_C;	/* carry bit */
172 		frame->tf_x[0] = error;
173 		break;
174 	}
175 }
176 
177 /*
178  * Initialize machine state, mostly pcb and trap frame for a new
179  * thread, about to return to userspace.  Put enough state in the new
180  * thread's PCB to get it to go back to the fork_return(), which
181  * finalizes the thread state and handles peculiarities of the first
182  * return to userspace for the new thread.
183  */
184 void
cpu_copy_thread(struct thread * td,struct thread * td0)185 cpu_copy_thread(struct thread *td, struct thread *td0)
186 {
187 	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
188 	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
189 
190 	td->td_pcb->pcb_x[PCB_X19] = (uintptr_t)fork_return;
191 	td->td_pcb->pcb_x[PCB_X20] = (uintptr_t)td;
192 	td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
193 	td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
194 
195 	/* Update VFP state for the new thread */
196 	vfp_new_thread(td, td0, false);
197 
198 	/* Setup to release spin count in fork_exit(). */
199 	td->td_md.md_spinlock_count = 1;
200 	td->td_md.md_saved_daif = PSR_DAIF_DEFAULT;
201 
202 #if defined(PERTHREAD_SSP)
203 	/* Set the new canary */
204 	arc4random_buf(&td->td_md.md_canary, sizeof(td->td_md.md_canary));
205 #endif
206 
207 	/* Generate new pointer authentication keys. */
208 	ptrauth_copy_thread(td, td0);
209 }
210 
211 /*
212  * Set that machine state for performing an upcall that starts
213  * the entry function with the given argument.
214  */
215 int
cpu_set_upcall(struct thread * td,void (* entry)(void *),void * arg,stack_t * stack)216 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
217 	stack_t *stack)
218 {
219 	struct trapframe *tf = td->td_frame;
220 
221 	/* 32bits processes use r13 for sp */
222 	if (td->td_frame->tf_spsr & PSR_M_32) {
223 		tf->tf_x[13] = STACKALIGN((uintptr_t)stack->ss_sp +
224 		    stack->ss_size);
225 		if ((register_t)entry & 1)
226 			tf->tf_spsr |= PSR_T;
227 	} else
228 		tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp +
229 		    stack->ss_size);
230 	tf->tf_elr = (register_t)entry;
231 	tf->tf_x[0] = (register_t)arg;
232 	tf->tf_x[29] = 0;
233 	tf->tf_lr = 0;
234 	return (0);
235 }
236 
237 int
cpu_set_user_tls(struct thread * td,void * tls_base)238 cpu_set_user_tls(struct thread *td, void *tls_base)
239 {
240 	struct pcb *pcb;
241 
242 	if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS)
243 		return (EINVAL);
244 
245 	pcb = td->td_pcb;
246 	if (td->td_frame->tf_spsr & PSR_M_32) {
247 		/* 32bits arm stores the user TLS into tpidrro */
248 		pcb->pcb_tpidrro_el0 = (register_t)tls_base;
249 		pcb->pcb_tpidr_el0 = (register_t)tls_base;
250 		if (td == curthread) {
251 			WRITE_SPECIALREG(tpidrro_el0, tls_base);
252 			WRITE_SPECIALREG(tpidr_el0, tls_base);
253 		}
254 	} else {
255 		pcb->pcb_tpidr_el0 = (register_t)tls_base;
256 		if (td == curthread)
257 			WRITE_SPECIALREG(tpidr_el0, tls_base);
258 	}
259 
260 	return (0);
261 }
262 
263 void
cpu_thread_exit(struct thread * td)264 cpu_thread_exit(struct thread *td)
265 {
266 }
267 
268 void
cpu_thread_alloc(struct thread * td)269 cpu_thread_alloc(struct thread *td)
270 {
271 
272 	td->td_pcb = (struct pcb *)(td->td_kstack +
273 	    td->td_kstack_pages * PAGE_SIZE) - 1;
274 	td->td_frame = (struct trapframe *)STACKALIGN(
275 	    (struct trapframe *)td->td_pcb - 1);
276 	ptrauth_thread_alloc(td);
277 }
278 
279 void
cpu_thread_free(struct thread * td)280 cpu_thread_free(struct thread *td)
281 {
282 }
283 
284 void
cpu_thread_clean(struct thread * td)285 cpu_thread_clean(struct thread *td)
286 {
287 }
288 
289 /*
290  * Intercept the return address from a freshly forked process that has NOT
291  * been scheduled yet.
292  *
293  * This is needed to make kernel threads stay in kernel mode.
294  */
295 void
cpu_fork_kthread_handler(struct thread * td,void (* func)(void *),void * arg)296 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
297 {
298 
299 	td->td_pcb->pcb_x[PCB_X19] = (uintptr_t)func;
300 	td->td_pcb->pcb_x[PCB_X20] = (uintptr_t)arg;
301 }
302 
303 void
cpu_exit(struct thread * td)304 cpu_exit(struct thread *td)
305 {
306 }
307 
308 bool
cpu_exec_vmspace_reuse(struct proc * p __unused,vm_map_t map __unused)309 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
310 {
311 
312 	return (true);
313 }
314 
315 int
cpu_procctl(struct thread * td __unused,int idtype __unused,id_t id __unused,int com __unused,void * data __unused)316 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
317     int com __unused, void *data __unused)
318 {
319 
320 	return (EINVAL);
321 }
322 
323 void
cpu_sync_core(void)324 cpu_sync_core(void)
325 {
326 	/*
327 	 * Do nothing. According to ARM ARMv8 D1.11 Exception return
328 	 * If FEAT_ExS is not implemented, or if FEAT_ExS is
329 	 * implemented and the SCTLR_ELx.EOS field is set, exception
330 	 * return from ELx is a context synchronization event.
331 	 */
332 }
333