xref: /netbsd/sys/arch/ia64/ia64/vm_machdep.c (revision 479e33ad)
1 /*	$NetBSD: vm_machdep.c,v 1.18 2023/02/23 14:55:47 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  *
8  * Author:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /* Comments on functions from alpha/vm_machdep.c */
33 
34 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
35 
36 #include <sys/param.h>
37 #include <sys/proc.h>
38 #include <sys/systm.h>
39 #include <sys/cpu.h>
40 #include <sys/atomic.h>
41 
42 #include <machine/frame.h>
43 #include <machine/md_var.h>
44 #include <machine/pcb.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 void lwp_trampoline(void);
49 
50 void
cpu_lwp_free(struct lwp * l,int proc)51 cpu_lwp_free(struct lwp *l, int proc)
52 {
53 
54 	/* XXX: Not yet. */
55 	(void)l;
56 	(void)proc;
57 }
58 
59 void
cpu_lwp_free2(struct lwp * l)60 cpu_lwp_free2(struct lwp *l)
61 {
62 
63 	(void)l;
64 }
65 
66 /*
67  * The cpu_switchto() function saves the context of the LWP which is
68  * currently running on the processor, and restores the context of the LWP
69  * specified by newlwp.  man cpu_switchto(9)
70  */
71 lwp_t *
cpu_switchto(lwp_t * oldlwp,lwp_t * newlwp,bool returning)72 cpu_switchto(lwp_t *oldlwp, lwp_t *newlwp, bool returning)
73 {
74 	const struct lwp *l = curlwp;
75 	struct pcb *oldpcb = oldlwp ? lwp_getpcb(oldlwp) : NULL;
76 	struct pcb *newpcb = lwp_getpcb(newlwp);
77 	struct cpu_info *ci = curcpu();
78 	register uint64_t reg9 __asm("r9");
79 
80 	KASSERT(newlwp != NULL);
81 
82 	/*
83 	 * Issue barriers to coordinate mutex_exit on this CPU with
84 	 * mutex_vector_enter on another CPU.
85 	 *
86 	 * 1. Any prior mutex_exit by oldlwp must be visible to other
87 	 *    CPUs before we set ci_curlwp := newlwp on this one,
88 	 *    requiring a store-before-store barrier.
89 	 *
90 	 * 2. ci_curlwp := newlwp must be visible on all other CPUs
91 	 *    before any subsequent mutex_exit by newlwp can even test
92 	 *    whether there might be waiters, requiring a
93 	 *    store-before-load barrier.
94 	 *
95 	 * See kern_mutex.c for details -- this is necessary for
96 	 * adaptive mutexes to detect whether the lwp is on the CPU in
97 	 * order to safely block without requiring atomic r/m/w in
98 	 * mutex_exit.
99 	 */
100 	membar_producer();	/* store-before-store */
101 	ci->ci_curlwp = newlwp;
102 	membar_sync();		/* store-before-load */
103 
104 	/* required for lwp_startup, copy oldlwp into r9, "mov r9=in0" */
105 	__asm __volatile("mov %0=%1" : "=r"(reg9) : "r"(oldlwp));
106 
107 	/* XXX handle RAS eventually */
108 
109 	if (oldlwp == NULL) {
110 		restorectx(newpcb);
111 	} else {
112 		KASSERT(oldlwp == l);
113 		swapctx(oldpcb, newpcb);
114 	}
115 
116 	/* return oldlwp for the original thread that called cpu_switchto */
117 	return ((lwp_t *)reg9);
118 }
119 
120 /*
121  * Finish a fork operation, with process p2 nearly set up.
122  * Copy and update the pcb and trap frame, making the child ready to run.
123  *
124  * Rig the child's kernel stack so that it will start out in
125  * lwp_trampoline() and call child_return() with p2 as an
126  * argument. This causes the newly-created child process to go
127  * directly to user level with an apparent return value of 0 from
128  * fork(), while the parent process returns normally.
129  *
130  * p1 is the process being forked; if p1 == &proc0, we are creating
131  * a kernel thread, and the return path and argument are specified with
132  * `func' and `arg'.
133  *
134  * If an alternate user-level stack is requested (with non-zero values
135  * in both the stack and stacksize args), set up the user stack pointer
136  * accordingly.
137  */
138 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)139 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
140     void (*func)(void *), void *arg)
141 {
142 	vaddr_t ua1 = uvm_lwp_getuarea(l1);
143 	vaddr_t ua2 = uvm_lwp_getuarea(l2);
144 	struct pcb *pcb1 = lwp_getpcb(l1);
145 	struct pcb *pcb2 = lwp_getpcb(l2);
146 
147 	struct trapframe *tf;
148 	uint64_t ndirty;
149 
150 	/*
151 	 * Save the preserved registers and the high FP registers in the
152 	 * PCB if we're the parent (ie l1 == curlwp) so that we have
153 	 * a valid PCB. This also causes a RSE flush. We don't have to
154 	 * do that otherwise, because there wouldn't be anything important
155 	 * to save.
156 	 *
157 	 * Copy pcb from lwp l1 to l2.
158 	 */
159 	if (l1 == curlwp) {
160 		/* Sync the PCB before we copy it. */
161 		if (savectx(pcb1) != 0)
162 			panic("unexpected return from savectx()");
163 		/* ia64_highfp_save(td1); XXX */
164 	} else {
165 		KASSERT(l1 == &lwp0);
166 	}
167 
168 	/*
169 	 * create the child's kernel stack and backing store. We basically
170 	 * create an image of the parent's stack and backing store and
171 	 * adjust where necessary.
172 	 */
173 	*pcb2 = *pcb1;
174 
175 	l2->l_md.md_flags = l1->l_md.md_flags;
176 	l2->l_md.md_tf = (struct trapframe *)(ua2 + UAREA_TF_OFFSET);
177 	l2->l_md.md_astpending = 0;
178 	l2->l_md.user_stack = NULL;
179 	l2->l_md.user_stack_size = 0;
180 
181         /*
182 	 * Copy the trapframe.
183 	 */
184 	tf = l2->l_md.md_tf;
185 	*tf = *l1->l_md.md_tf;
186 
187 	/* XXX need something like this, but still not correct */
188 	ndirty = tf->tf_special.ndirty + (tf->tf_special.bspstore & 0x1ffUL);
189 	memcpy((void *)(ua2 + UAREA_BSPSTORE_OFFSET),
190 	       (void *)(ua1 + UAREA_BSPSTORE_OFFSET), ndirty);
191 
192         /*
193 	 * If specified, give the child a different stack.
194 	 */
195 	if (stack != NULL) {
196 		l2->l_md.user_stack = stack;
197 		l2->l_md.user_stack_size = stacksize;
198 		tf->tf_special.sp = (unsigned long)stack + UAREA_SP_OFFSET;
199 		tf->tf_special.bspstore = (unsigned long)stack + UAREA_BSPSTORE_OFFSET;
200 
201 		memcpy(stack, (void *)(ua1 + UAREA_BSPSTORE_OFFSET), ndirty);
202 	}
203 
204 	/* Set-up the return values as expected by the fork() libc stub. */
205 	if (tf->tf_special.psr & IA64_PSR_IS) {
206 		tf->tf_scratch.gr8 = 0;
207 		tf->tf_scratch.gr10 = 1;
208 	} else {
209 		tf->tf_scratch.gr8 = 0;
210 		tf->tf_scratch.gr9 = 1;
211 		tf->tf_scratch.gr10 = 0;
212 	}
213 
214 	pcb2->pcb_special.bspstore = ua2 + UAREA_BSPSTORE_OFFSET + ndirty;
215 	pcb2->pcb_special.pfs = 0;
216 	pcb2->pcb_special.sp = ua2 + UAREA_SP_OFFSET;
217 	pcb2->pcb_special.rp = (unsigned long)FDESC_FUNC(lwp_trampoline);
218 	tf->tf_scratch.gr2 = (unsigned long)FDESC_FUNC(func);
219 	tf->tf_scratch.gr3 = (unsigned long)arg;
220 
221 	return;
222 }
223 
224 /*
225  * Map a user I/O request into kernel virtual address space.
226  * Note: the pages are already locked by uvm_vslock(), so we
227  * do not need to pass an access_type to pmap_enter().
228  */
229 int
vmapbuf(struct buf * bp,vsize_t len)230 vmapbuf(struct buf *bp, vsize_t len)
231 {
232 	panic("XXX %s implement", __func__);
233 	return 0;
234 }
235 
236 /*
237  * Unmap a previously-mapped user I/O request.
238  */
239 void
vunmapbuf(struct buf * bp,vsize_t len)240 vunmapbuf(struct buf *bp, vsize_t len)
241 {
242 	panic("XXX %s implement", __func__);
243 	return;
244 }
245