xref: /freebsd/sys/arm/arm/vm_machdep.c (revision d0b2dbfa)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1982, 1986 The Regents of the University of California.
5  * Copyright (c) 1989, 1990 William Jolitz
6  * Copyright (c) 1994 John Dyson
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * the Systems Programming Group of the University of Utah Computer
11  * Science Department, and William Jolitz.
12  *
13  * Redistribution and use in source and binary :forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  *
41  *	from: @(#)vm_machdep.c	7.3 (Berkeley) 5/13/91
42  *	Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
43  */
44 
45 #include <sys/cdefs.h>
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/proc.h>
52 #include <sys/socketvar.h>
53 #include <sys/syscall.h>
54 #include <sys/sysctl.h>
55 #include <sys/sysent.h>
56 #include <sys/unistd.h>
57 
58 #include <machine/cpu.h>
59 #include <machine/frame.h>
60 #include <machine/pcb.h>
61 #include <machine/sysarch.h>
62 #include <sys/lock.h>
63 #include <sys/mutex.h>
64 
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67 #include <vm/vm_extern.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_page.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_param.h>
72 #include <vm/vm_pageout.h>
73 #include <vm/uma.h>
74 #include <vm/uma_int.h>
75 
76 #include <machine/md_var.h>
77 #include <machine/vfp.h>
78 
79 /*
80  * struct switchframe and trapframe must both be a multiple of 8
81  * for correct stack alignment.
82  */
83 _Static_assert((sizeof(struct switchframe) % 8) == 0, "Bad alignment");
84 _Static_assert((sizeof(struct trapframe) % 8) == 0, "Bad alignment");
85 
86 uint32_t initial_fpscr = VFPSCR_DN | VFPSCR_FZ;
87 
88 /*
89  * Finish a fork operation, with process p2 nearly set up.
90  * Copy and update the pcb, set up the stack so that the child
91  * ready to run and return to user mode.
92  */
93 void
94 cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
95 {
96 	struct pcb *pcb2;
97 	struct trapframe *tf;
98 	struct mdproc *mdp2;
99 
100 	if ((flags & RFPROC) == 0)
101 		return;
102 
103 	/* Point the pcb to the top of the stack */
104 	pcb2 = (struct pcb *)
105 	    (td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
106 #ifdef VFP
107 	/* Store actual state of VFP */
108 	if (curthread == td1) {
109 		if ((td1->td_pcb->pcb_fpflags & PCB_FP_STARTED) != 0)
110 			vfp_save_state(td1, td1->td_pcb);
111 	}
112 #endif
113 	td2->td_pcb = pcb2;
114 
115 	/* Clone td1's pcb */
116 	bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
117 
118 	/* Point to mdproc and then copy over td1's contents */
119 	mdp2 = &p2->p_md;
120 	bcopy(&td1->td_proc->p_md, mdp2, sizeof(*mdp2));
121 
122 	/* Point the frame to the stack in front of pcb and copy td1's frame */
123 	td2->td_frame = (struct trapframe *)pcb2 - 1;
124 	*td2->td_frame = *td1->td_frame;
125 
126 	/*
127 	 * Create a new fresh stack for the new process.
128 	 * Copy the trap frame for the return to user mode as if from a
129 	 * syscall.  This copies most of the user mode register values.
130 	 */
131 	pmap_set_pcb_pagedir(vmspace_pmap(p2->p_vmspace), pcb2);
132 	pcb2->pcb_regs.sf_r4 = (register_t)fork_return;
133 	pcb2->pcb_regs.sf_r5 = (register_t)td2;
134 	pcb2->pcb_regs.sf_lr = (register_t)fork_trampoline;
135 	pcb2->pcb_regs.sf_sp = STACKALIGN(td2->td_frame);
136 	pcb2->pcb_regs.sf_tpidrurw = (register_t)get_tls();
137 
138 #ifdef VFP
139 	vfp_new_thread(td2, td1, true);
140 #endif
141 
142 	tf = td2->td_frame;
143 	tf->tf_spsr &= ~PSR_C;
144 	tf->tf_r0 = 0;
145 	tf->tf_r1 = 0;
146 
147 	/* Setup to release spin count in fork_exit(). */
148 	td2->td_md.md_spinlock_count = 1;
149 	td2->td_md.md_saved_cspr = PSR_SVC32_MODE;
150 }
151 
152 void
153 cpu_thread_swapin(struct thread *td)
154 {
155 }
156 
157 void
158 cpu_thread_swapout(struct thread *td)
159 {
160 }
161 
162 void
163 cpu_set_syscall_retval(struct thread *td, int error)
164 {
165 	struct trapframe *frame;
166 
167 	frame = td->td_frame;
168 	switch (error) {
169 	case 0:
170 		frame->tf_r0 = td->td_retval[0];
171 		frame->tf_r1 = td->td_retval[1];
172 		frame->tf_spsr &= ~PSR_C;   /* carry bit */
173 		break;
174 	case ERESTART:
175 		/*
176 		 * Reconstruct the pc to point at the swi.
177 		 */
178 #if __ARM_ARCH >= 7
179 		if ((frame->tf_spsr & PSR_T) != 0)
180 			frame->tf_pc -= THUMB_INSN_SIZE;
181 		else
182 #endif
183 			frame->tf_pc -= INSN_SIZE;
184 		break;
185 	case EJUSTRETURN:
186 		/* nothing to do */
187 		break;
188 	default:
189 		frame->tf_r0 = error;
190 		frame->tf_spsr |= PSR_C;    /* carry bit */
191 		break;
192 	}
193 }
194 
195 /*
196  * Initialize machine state, mostly pcb and trap frame for a new
197  * thread, about to return to userspace.  Put enough state in the new
198  * thread's PCB to get it to go back to the fork_return(), which
199  * finalizes the thread state and handles peculiarities of the first
200  * return to userspace for the new thread.
201  */
202 void
203 cpu_copy_thread(struct thread *td, struct thread *td0)
204 {
205 
206 	bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
207 	bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
208 
209 	td->td_pcb->pcb_regs.sf_r4 = (register_t)fork_return;
210 	td->td_pcb->pcb_regs.sf_r5 = (register_t)td;
211 	td->td_pcb->pcb_regs.sf_lr = (register_t)fork_trampoline;
212 	td->td_pcb->pcb_regs.sf_sp = STACKALIGN(td->td_frame);
213 
214 	td->td_frame->tf_spsr &= ~PSR_C;
215 	td->td_frame->tf_r0 = 0;
216 
217 #ifdef VFP
218 	vfp_new_thread(td, td0, false);
219 #endif
220 
221 	/* Setup to release spin count in fork_exit(). */
222 	td->td_md.md_spinlock_count = 1;
223 	td->td_md.md_saved_cspr = PSR_SVC32_MODE;
224 }
225 
226 /*
227  * Set that machine state for performing an upcall that starts
228  * the entry function with the given argument.
229  */
230 void
231 cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
232 	stack_t *stack)
233 {
234 	struct trapframe *tf = td->td_frame;
235 
236 	tf->tf_usr_sp = STACKALIGN((int)stack->ss_sp + stack->ss_size);
237 	tf->tf_pc = (int)entry;
238 	tf->tf_r0 = (int)arg;
239 	tf->tf_spsr = PSR_USR32_MODE;
240 	if ((register_t)entry & 1)
241 		tf->tf_spsr |= PSR_T;
242 }
243 
244 int
245 cpu_set_user_tls(struct thread *td, void *tls_base)
246 {
247 
248 	td->td_pcb->pcb_regs.sf_tpidrurw = (register_t)tls_base;
249 	if (td == curthread)
250 		set_tls(tls_base);
251 	return (0);
252 }
253 
254 void
255 cpu_thread_exit(struct thread *td)
256 {
257 }
258 
259 void
260 cpu_thread_alloc(struct thread *td)
261 {
262 	td->td_pcb = (struct pcb *)(td->td_kstack + td->td_kstack_pages *
263 	    PAGE_SIZE) - 1;
264 	/*
265 	 * Ensure td_frame is aligned to an 8 byte boundary as it will be
266 	 * placed into the stack pointer which must be 8 byte aligned in
267 	 * the ARM EABI.
268 	 */
269 	td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1;
270 }
271 
272 void
273 cpu_thread_free(struct thread *td)
274 {
275 }
276 
277 void
278 cpu_thread_clean(struct thread *td)
279 {
280 }
281 
282 /*
283  * Intercept the return address from a freshly forked process that has NOT
284  * been scheduled yet.
285  *
286  * This is needed to make kernel threads stay in kernel mode.
287  */
288 void
289 cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
290 {
291 	td->td_pcb->pcb_regs.sf_r4 = (register_t)func;	/* function */
292 	td->td_pcb->pcb_regs.sf_r5 = (register_t)arg;	/* first arg */
293 }
294 
295 void
296 cpu_exit(struct thread *td)
297 {
298 }
299 
300 bool
301 cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
302 {
303 
304 	return (true);
305 }
306 
307 int
308 cpu_procctl(struct thread *td __unused, int idtype __unused, id_t id __unused,
309     int com __unused, void *data __unused)
310 {
311 
312 	return (EINVAL);
313 }
314