xref: /freebsd/sys/arm64/arm64/vm_machdep.c (revision 1323ec57)
1 /*-
2  * Copyright (c) 2014 Andrew Turner
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include "opt_platform.h"
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/limits.h>
36 #include <sys/proc.h>
37 #include <sys/sf_buf.h>
38 #include <sys/signal.h>
39 #include <sys/sysent.h>
40 #include <sys/unistd.h>
41 
42 #include <vm/vm.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_map.h>
45 #include <vm/uma.h>
46 #include <vm/uma_int.h>
47 
48 #include <machine/armreg.h>
49 #include <machine/cpu.h>
50 #include <machine/md_var.h>
51 #include <machine/pcb.h>
52 #include <machine/frame.h>
53 
54 #ifdef VFP
55 #include <machine/vfp.h>
56 #endif
57 
58 #include <dev/psci/psci.h>
59 
60 /*
61  * Finish a fork operation, with process p2 nearly set up.
62  * Copy and update the pcb, set up the stack so that the child
63  * ready to run and return to user mode.
64  */
65 void
66 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
67 {
68 	struct pcb *pcb2;
69 	struct trapframe *tf;
70 
71 	if ((flags & RFPROC) == 0)
72 		return;
73 
74 	if (td1 == curthread) {
75 		/*
76 		 * Save the tpidr_el0 and the vfp state, these normally happen
77 		 * in cpu_switch, but if userland changes these then forks
78 		 * this may not have happened.
79 		 */
80 		td1->td_pcb->pcb_tpidr_el0 = READ_SPECIALREG(tpidr_el0);
81 		td1->td_pcb->pcb_tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
82 #ifdef VFP
83 		if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0)
84 			vfp_save_state(td1, td1->td_pcb);
85 #endif
86 	}
87 
88 	pcb2 = (struct pcb *)(td2->td_kstack +
89 	    td2->td_kstack_pages * PAGE_SIZE) - 1;
90 
91 	td2->td_pcb = pcb2;
92 	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
93 
94 	/* Clear the debug register state. */
95 	bzero(&pcb2->pcb_dbg_regs, sizeof(pcb2->pcb_dbg_regs));
96 
97 	ptrauth_fork(td2, td1);
98 
99 	tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
100 	bcopy(td1->td_frame, tf, sizeof(*tf));
101 	tf->tf_x[0] = 0;
102 	tf->tf_x[1] = 0;
103 	tf->tf_spsr = td1->td_frame->tf_spsr & (PSR_M_32 | PSR_DAIF);
104 
105 	td2->td_frame = tf;
106 
107 	/* Set the return value registers for fork() */
108 	td2->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
109 	td2->td_pcb->pcb_x[9] = (uintptr_t)td2;
110 	td2->td_pcb->pcb_lr = (uintptr_t)fork_trampoline;
111 	td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame;
112 	td2->td_pcb->pcb_fpusaved = &td2->td_pcb->pcb_fpustate;
113 	td2->td_pcb->pcb_vfpcpu = UINT_MAX;
114 
115 	/* Setup to release spin count in fork_exit(). */
116 	td2->td_md.md_spinlock_count = 1;
117 	td2->td_md.md_saved_daif = PSR_DAIF_DEFAULT;
118 
119 #if defined(PERTHREAD_SSP)
120 	/* Set the new canary */
121 	arc4random_buf(&td2->td_md.md_canary, sizeof(td2->td_md.md_canary));
122 #endif
123 }
124 
125 void
126 cpu_reset(void)
127 {
128 
129 	psci_reset();
130 
131 	printf("cpu_reset failed");
132 	while(1)
133 		__asm volatile("wfi" ::: "memory");
134 }
135 
136 void
137 cpu_thread_swapin(struct thread *td)
138 {
139 }
140 
141 void
142 cpu_thread_swapout(struct thread *td)
143 {
144 }
145 
146 void
147 cpu_set_syscall_retval(struct thread *td, int error)
148 {
149 	struct trapframe *frame;
150 
151 	frame = td->td_frame;
152 
153 	if (__predict_true(error == 0)) {
154 		frame->tf_x[0] = td->td_retval[0];
155 		frame->tf_x[1] = td->td_retval[1];
156 		frame->tf_spsr &= ~PSR_C;	/* carry bit */
157 		return;
158 	}
159 
160 	switch (error) {
161 	case ERESTART:
162 		frame->tf_elr -= 4;
163 		break;
164 	case EJUSTRETURN:
165 		break;
166 	default:
167 		frame->tf_spsr |= PSR_C;	/* carry bit */
168 		frame->tf_x[0] = error;
169 		break;
170 	}
171 }
172 
173 /*
174  * Initialize machine state, mostly pcb and trap frame for a new
175  * thread, about to return to userspace.  Put enough state in the new
176  * thread's PCB to get it to go back to the fork_return(), which
177  * finalizes the thread state and handles peculiarities of the first
178  * return to userspace for the new thread.
179  */
180 void
181 cpu_copy_thread(struct thread *td, struct thread *td0)
182 {
183 	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
184 	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
185 
186 	td->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
187 	td->td_pcb->pcb_x[9] = (uintptr_t)td;
188 	td->td_pcb->pcb_lr = (uintptr_t)fork_trampoline;
189 	td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
190 	td->td_pcb->pcb_fpflags &= ~(PCB_FP_STARTED | PCB_FP_KERN | PCB_FP_NOSAVE);
191 	td->td_pcb->pcb_fpusaved = &td->td_pcb->pcb_fpustate;
192 	td->td_pcb->pcb_vfpcpu = UINT_MAX;
193 
194 	/* Setup to release spin count in fork_exit(). */
195 	td->td_md.md_spinlock_count = 1;
196 	td->td_md.md_saved_daif = PSR_DAIF_DEFAULT;
197 
198 #if defined(PERTHREAD_SSP)
199 	/* Set the new canary */
200 	arc4random_buf(&td->td_md.md_canary, sizeof(td->td_md.md_canary));
201 #endif
202 
203 	/* Generate new pointer authentication keys. */
204 	ptrauth_copy_thread(td, td0);
205 }
206 
207 /*
208  * Set that machine state for performing an upcall that starts
209  * the entry function with the given argument.
210  */
211 void
212 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
213 	stack_t *stack)
214 {
215 	struct trapframe *tf = td->td_frame;
216 
217 	/* 32bits processes use r13 for sp */
218 	if (td->td_frame->tf_spsr & PSR_M_32) {
219 		tf->tf_x[13] = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size);
220 		if ((register_t)entry & 1)
221 			tf->tf_spsr |= PSR_T;
222 	} else
223 		tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size);
224 	tf->tf_elr = (register_t)entry;
225 	tf->tf_x[0] = (register_t)arg;
226 }
227 
228 int
229 cpu_set_user_tls(struct thread *td, void *tls_base)
230 {
231 	struct pcb *pcb;
232 
233 	if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS)
234 		return (EINVAL);
235 
236 	pcb = td->td_pcb;
237 	if (td->td_frame->tf_spsr & PSR_M_32) {
238 		/* 32bits arm stores the user TLS into tpidrro */
239 		pcb->pcb_tpidrro_el0 = (register_t)tls_base;
240 		pcb->pcb_tpidr_el0 = (register_t)tls_base;
241 		if (td == curthread) {
242 			WRITE_SPECIALREG(tpidrro_el0, tls_base);
243 			WRITE_SPECIALREG(tpidr_el0, tls_base);
244 		}
245 	} else {
246 		pcb->pcb_tpidr_el0 = (register_t)tls_base;
247 		if (td == curthread)
248 			WRITE_SPECIALREG(tpidr_el0, tls_base);
249 	}
250 
251 	return (0);
252 }
253 
254 void
255 cpu_thread_exit(struct thread *td)
256 {
257 }
258 
259 void
260 cpu_thread_alloc(struct thread *td)
261 {
262 
263 	td->td_pcb = (struct pcb *)(td->td_kstack +
264 	    td->td_kstack_pages * PAGE_SIZE) - 1;
265 	td->td_frame = (struct trapframe *)STACKALIGN(
266 	    (struct trapframe *)td->td_pcb - 1);
267 	ptrauth_thread_alloc(td);
268 }
269 
270 void
271 cpu_thread_free(struct thread *td)
272 {
273 }
274 
275 void
276 cpu_thread_clean(struct thread *td)
277 {
278 }
279 
280 /*
281  * Intercept the return address from a freshly forked process that has NOT
282  * been scheduled yet.
283  *
284  * This is needed to make kernel threads stay in kernel mode.
285  */
286 void
287 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
288 {
289 
290 	td->td_pcb->pcb_x[8] = (uintptr_t)func;
291 	td->td_pcb->pcb_x[9] = (uintptr_t)arg;
292 }
293 
294 void
295 cpu_exit(struct thread *td)
296 {
297 }
298 
299 bool
300 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
301 {
302 
303 	return (true);
304 }
305 
306 int
307 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
308     int com __unused, void *data __unused)
309 {
310 
311 	return (EINVAL);
312 }
313