1 /* $NetBSD: vm_machdep.c,v 1.122 2021/12/05 07:53:57 msaitoh Exp $ */
2
3 /*
4 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
5 * All rights reserved.
6 *
7 * Author: Chris G. Demetriou
8 *
9 * Permission to use, copy, modify and distribute this software and
10 * its documentation is hereby granted, provided that both the copyright
11 * notice and this permission notice appear in all copies of the
12 * software, derivative works or modified versions, and any portions
13 * thereof, and that both notices appear in supporting documentation.
14 *
15 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
16 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
17 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie the
27 * rights to redistribute these changes.
28 */
29
30 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
31
32 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.122 2021/12/05 07:53:57 msaitoh Exp $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/proc.h>
37 #include <sys/buf.h>
38 #include <sys/vnode.h>
39 #include <sys/core.h>
40 #include <sys/exec.h>
41
42 #include <uvm/uvm.h>
43
44 #include <machine/cpu.h>
45 #include <machine/alpha.h>
46 #include <machine/pmap.h>
47 #include <machine/reg.h>
48
49 void
cpu_lwp_free(struct lwp * l,int proc)50 cpu_lwp_free(struct lwp *l, int proc)
51 {
52 (void) l;
53 }
54
55 void
cpu_lwp_free2(struct lwp * l)56 cpu_lwp_free2(struct lwp *l)
57 {
58 (void) l;
59 }
60
61 /*
62 * This is a backstop used to ensure that kernel threads never do
63 * something silly like attempt to return to userspace. We achieve
64 * this by putting this at the root of their call graph instead of
65 * exception_return().
66 */
67 void
alpha_kthread_backstop(void)68 alpha_kthread_backstop(void)
69 {
70 struct lwp * const l = curlwp;
71
72 panic("kthread lwp %p (%s) hit the backstop", l, l->l_name);
73 }
74
75 /*
76 * Finish a fork operation, with thread l2 nearly set up.
77 * Copy and update the pcb and trap frame, making the child ready to run.
78 *
79 * Rig the child's kernel stack so that it will start out in
80 * lwp_trampoline() and call child_return() with l2 as an
81 * argument. This causes the newly-created child thread to go
82 * directly to user level with an apparent return value of 0 from
83 * fork(), while the parent process returns normally.
84 *
85 * l1 is the thread being forked; if l1 == &lwp0, we are creating
86 * a kernel thread, and the return path and argument are specified with
87 * `func' and `arg'.
88 *
89 * If an alternate user-level stack is requested (with non-zero values
90 * in both the stack and stacksize args), set up the user stack pointer
91 * accordingly.
92 */
93 void
cpu_lwp_fork(struct lwp * l1,struct lwp * l2,void * stack,size_t stacksize,void (* func)(void *),void * arg)94 cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
95 void (*func)(void *), void *arg)
96 {
97 struct pcb *pcb1, *pcb2;
98 extern void lwp_trampoline(void);
99
100 pcb1 = lwp_getpcb(l1);
101 pcb2 = lwp_getpcb(l2);
102
103 l2->l_md.md_tf = l1->l_md.md_tf;
104 l2->l_md.md_flags = l1->l_md.md_flags & MDLWP_FP_C;
105 l2->l_md.md_astpending = 0;
106
107 /*
108 * Cache the physical address of the pcb, so we can
109 * swap to it easily.
110 */
111 l2->l_md.md_pcbpaddr = (void *)vtophys((vaddr_t)pcb2);
112
113 /*
114 * Copy pcb and user stack pointer from proc p1 to p2.
115 * If specified, give the child a different stack.
116 * Floating point state from the FP chip has already been saved.
117 */
118 *pcb2 = *pcb1;
119 if (stack != NULL) {
120 pcb2->pcb_hw.apcb_usp =
121 ((u_long)stack + stacksize) & ~((u_long)STACK_ALIGNBYTES);
122 } else {
123 pcb2->pcb_hw.apcb_usp = alpha_pal_rdusp();
124 }
125
126 /*
127 * Put l2 on the kernel's page tables until its first trip
128 * through pmap_activate().
129 */
130 pcb2->pcb_hw.apcb_ptbr =
131 ALPHA_K0SEG_TO_PHYS((vaddr_t)kernel_lev1map) >> PGSHIFT;
132 pcb2->pcb_hw.apcb_asn = PMAP_ASN_KERNEL;
133
134 #ifdef DIAGNOSTIC
135 /*
136 * If l1 != curlwp && l1 == &lwp0, we are creating a kernel
137 * thread.
138 */
139 if (l1 != curlwp && l1 != &lwp0)
140 panic("cpu_lwp_fork: curlwp");
141 #endif
142
143 /*
144 * create the child's kernel stack, from scratch.
145 */
146 {
147 struct trapframe *l2tf;
148 uint64_t call_root;
149
150 /*
151 * Pick a stack pointer, leaving room for a trapframe;
152 * copy trapframe from parent so return to user mode
153 * will be to right address, with correct registers.
154 */
155 l2tf = l2->l_md.md_tf = (struct trapframe *)
156 (uvm_lwp_getuarea(l2) + USPACE - sizeof(struct trapframe));
157 memcpy(l2->l_md.md_tf, l1->l_md.md_tf,
158 sizeof(struct trapframe));
159
160 /*
161 * Set up return-value registers as fork() libc stub expects.
162 */
163 l2tf->tf_regs[FRAME_V0] = l1->l_proc->p_pid; /* parent's pid */
164 l2tf->tf_regs[FRAME_A3] = 0; /* no error */
165 l2tf->tf_regs[FRAME_A4] = 1; /* is child */
166
167 /*
168 * Normal LWPs have their return address set to
169 * exception_return() so that they'll pop into
170 * user space. But kernel threads don't have
171 * a user space, so we put a backtop in place
172 * just in case they try.
173 */
174 if (__predict_true(l2->l_proc != &proc0))
175 call_root = (uint64_t)exception_return;
176 else
177 call_root = (uint64_t)alpha_kthread_backstop;
178
179 pcb2->pcb_hw.apcb_ksp =
180 (uint64_t)l2->l_md.md_tf;
181 pcb2->pcb_context[0] =
182 (uint64_t)func; /* s0: pc */
183 pcb2->pcb_context[1] =
184 call_root; /* s1: ra */
185 pcb2->pcb_context[2] =
186 (uint64_t)arg; /* s2: arg */
187 pcb2->pcb_context[3] =
188 (uint64_t)l2; /* s3: lwp */
189 pcb2->pcb_context[7] =
190 (uint64_t)lwp_trampoline; /* ra: assembly magic */
191 }
192 }
193
194 /*
195 * Map a user I/O request into kernel virtual address space.
196 * Note: the pages are already locked by uvm_vslock(), so we
197 * do not need to pass an access_type to pmap_enter().
198 */
199 int
vmapbuf(struct buf * bp,vsize_t len)200 vmapbuf(struct buf *bp, vsize_t len)
201 {
202 vaddr_t faddr, taddr, off;
203 paddr_t pa;
204 struct proc *p;
205
206 if ((bp->b_flags & B_PHYS) == 0)
207 panic("vmapbuf");
208 p = bp->b_proc;
209 bp->b_saveaddr = bp->b_data;
210 faddr = trunc_page((vaddr_t)bp->b_data);
211 off = (vaddr_t)bp->b_data - faddr;
212 len = round_page(off + len);
213 taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY|UVM_KMF_WAITVA);
214 bp->b_data = (void *)(taddr + off);
215 len = atop(len);
216 while (len--) {
217 if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
218 &pa) == false)
219 panic("vmapbuf: null page frame");
220 pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
221 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
222 faddr += PAGE_SIZE;
223 taddr += PAGE_SIZE;
224 }
225 pmap_update(vm_map_pmap(phys_map));
226
227 return 0;
228 }
229
230 /*
231 * Unmap a previously-mapped user I/O request.
232 */
233 void
vunmapbuf(struct buf * bp,vsize_t len)234 vunmapbuf(struct buf *bp, vsize_t len)
235 {
236 vaddr_t addr, off;
237
238 if ((bp->b_flags & B_PHYS) == 0)
239 panic("vunmapbuf");
240 addr = trunc_page((vaddr_t)bp->b_data);
241 off = (vaddr_t)bp->b_data - addr;
242 len = round_page(off + len);
243 pmap_remove(vm_map_pmap(phys_map), addr, addr + len);
244 pmap_update(vm_map_pmap(phys_map));
245 uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
246 bp->b_data = bp->b_saveaddr;
247 bp->b_saveaddr = NULL;
248 }
249
250 #ifdef __HAVE_CPU_UAREA_ROUTINES
251 static struct evcnt uarea_direct_success =
252 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea direct", "success");
253 static struct evcnt uarea_direct_failure =
254 EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "uarea direct", "failure");
255
256 EVCNT_ATTACH_STATIC(uarea_direct_success);
257 EVCNT_ATTACH_STATIC(uarea_direct_failure);
258
259 void *
cpu_uarea_alloc(bool system)260 cpu_uarea_alloc(bool system)
261 {
262 struct pglist pglist;
263 int error;
264
265 /*
266 * Allocate a new physically contiguous uarea which can be
267 * direct-mapped.
268 */
269 error = uvm_pglistalloc(USPACE, 0, ptoa(physmem), 0, 0, &pglist, 1, 1);
270 if (error) {
271 atomic_inc_ulong(&uarea_direct_failure.ev_count);
272 return NULL;
273 }
274 atomic_inc_ulong(&uarea_direct_success.ev_count);
275
276 /*
277 * Get the physical address from the first page.
278 */
279 const struct vm_page * const pg = TAILQ_FIRST(&pglist);
280 KASSERT(pg != NULL);
281 const paddr_t pa = VM_PAGE_TO_PHYS(pg);
282
283 /*
284 * We need to return a direct-mapped VA for the pa.
285 */
286
287 return (void *)PMAP_MAP_POOLPAGE(pa);
288 }
289
290 /*
291 * Return true if we freed it, false if we didn't.
292 */
293 bool
cpu_uarea_free(void * vva)294 cpu_uarea_free(void *vva)
295 {
296 vaddr_t va = (vaddr_t) vva;
297 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS)
298 return false;
299
300 /*
301 * Since the pages are physically contiguous, the vm_page structure
302 * will be as well.
303 */
304 struct vm_page *pg = PHYS_TO_VM_PAGE(PMAP_UNMAP_POOLPAGE(va));
305 KASSERT(pg != NULL);
306 for (size_t i = 0; i < UPAGES; i++, pg++) {
307 uvm_pagefree(pg);
308 }
309 return true;
310 }
311 #endif /* __HAVE_CPU_UAREA_ROUTINES */
312