1 /* $OpenBSD: vm_machdep.c,v 1.39 2017/08/17 20:50:51 tom Exp $ */ 2 /* $NetBSD: vm_machdep.c,v 1.38 2001/06/30 00:02:20 eeh Exp $ */ 3 4 /* 5 * Copyright (c) 1996 6 * The President and Fellows of Harvard College. All rights reserved. 7 * Copyright (c) 1992, 1993 8 * The Regents of the University of California. All rights reserved. 9 * 10 * This software was developed by the Computer Systems Engineering group 11 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 12 * contributed to Berkeley. 13 * 14 * All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by the University of 17 * California, Lawrence Berkeley Laboratory. 18 * This product includes software developed by Harvard University. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 1. Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * 2. Redistributions in binary form must reproduce the above copyright 26 * notice, this list of conditions and the following disclaimer in the 27 * documentation and/or other materials provided with the distribution. 28 * 3. All advertising materials mentioning features or use of this software 29 * must display the following acknowledgement: 30 * This product includes software developed by Harvard University. 31 * This product includes software developed by the University of 32 * California, Berkeley and its contributors. 33 * 4. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 * 49 * @(#)vm_machdep.c 8.2 (Berkeley) 9/23/93 50 */ 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/proc.h> 55 #include <sys/user.h> 56 #include <sys/malloc.h> 57 #include <sys/buf.h> 58 #include <sys/exec.h> 59 #include <sys/vnode.h> 60 #include <sys/signalvar.h> 61 62 #include <uvm/uvm_extern.h> 63 64 #include <machine/cpu.h> 65 #include <machine/frame.h> 66 #include <machine/trap.h> 67 #include <machine/bus.h> 68 69 #include <sparc64/sparc64/cache.h> 70 71 /* 72 * Map a user I/O request into kernel virtual address space. 73 * Note: the pages are already locked by uvm_vslock(), so we 74 * do not need to pass an access_type to pmap_enter(). 75 */ 76 void 77 vmapbuf(struct buf *bp, vsize_t len) 78 { 79 struct pmap *upmap, *kpmap; 80 vaddr_t uva; /* User VA (map from) */ 81 vaddr_t kva; /* Kernel VA (new to) */ 82 paddr_t pa; /* physical address */ 83 vsize_t off; 84 85 if ((bp->b_flags & B_PHYS) == 0) 86 panic("vmapbuf"); 87 88 /* 89 * XXX: It might be better to round/trunc to a 90 * segment boundary to avoid VAC problems! 91 */ 92 bp->b_saveaddr = bp->b_data; 93 uva = trunc_page((vaddr_t)bp->b_data); 94 off = (vaddr_t)bp->b_data - uva; 95 len = round_page(off + len); 96 kva = uvm_km_valloc_prefer_wait(phys_map, len, uva); 97 bp->b_data = (caddr_t)(kva + off); 98 99 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); 100 kpmap = vm_map_pmap(kernel_map); 101 do { 102 if (pmap_extract(upmap, uva, &pa) == FALSE) 103 panic("vmapbuf: null page frame"); 104 /* Now map the page into kernel space. */ 105 pmap_enter(pmap_kernel(), kva, 106 pa /* | PMAP_NC */, 107 PROT_READ | PROT_WRITE, 108 PROT_READ | PROT_WRITE | PMAP_WIRED); 109 110 uva += PAGE_SIZE; 111 kva += PAGE_SIZE; 112 len -= PAGE_SIZE; 113 } while (len); 114 pmap_update(pmap_kernel()); 115 } 116 117 /* 118 * Unmap a previously-mapped user I/O request. 119 */ 120 void 121 vunmapbuf(struct buf *bp, vsize_t len) 122 { 123 vaddr_t kva; 124 vsize_t off; 125 126 if ((bp->b_flags & B_PHYS) == 0) 127 panic("vunmapbuf"); 128 129 kva = trunc_page((vaddr_t)bp->b_data); 130 off = (vaddr_t)bp->b_data - kva; 131 len = round_page(off + len); 132 133 pmap_remove(pmap_kernel(), kva, kva + len); 134 pmap_update(pmap_kernel()); 135 uvm_km_free_wakeup(phys_map, kva, len); 136 bp->b_data = bp->b_saveaddr; 137 bp->b_saveaddr = NULL; 138 } 139 140 141 /* 142 * The offset of the topmost frame in the kernel stack. 143 */ 144 #define TOPFRAMEOFF (USPACE-sizeof(struct trapframe)-CC64FSZ) 145 #define STACK_OFFSET BIAS 146 147 #ifdef DEBUG 148 char cpu_forkname[] = "cpu_fork()"; 149 #endif 150 151 /* 152 * Finish a fork operation, with process p2 nearly set up. 153 * Copy and update the pcb and trap frame, making the child ready to run. 154 * 155 * Rig the child's kernel stack so that it will start out in 156 * proc_trampoline() and call 'func' with 'arg' as an argument. 157 * For normal processes this is child_return(), which causes the 158 * child to go directly to user level with an apparent return value 159 * of 0 from fork(), while the parent process returns normally. 160 * For kernel threads this will be a function that never returns. 161 * 162 * An alternate user-level stack or TCB can be requested by passing 163 * a non-NULL value; these are poked into the PCB so they're in 164 * effect at the initial return to userspace. 165 */ 166 void 167 cpu_fork(struct proc *p1, struct proc *p2, void *stack, void *tcb, 168 void (*func)(void *), void *arg) 169 { 170 struct pcb *opcb = &p1->p_addr->u_pcb; 171 struct pcb *npcb = &p2->p_addr->u_pcb; 172 struct trapframe *tf2; 173 struct rwindow *rp; 174 extern struct proc proc0; 175 176 /* 177 * Save all user registers to p1's stack or, in the case of 178 * user registers and invalid stack pointers, to opcb. 179 * We then copy the whole pcb to p2; when switch() selects p2 180 * to run, it will run at the `proc_trampoline' stub, rather 181 * than returning at the copying code below. 182 * 183 * If process p1 has an FPU state, we must copy it. If it is 184 * the FPU user, we must save the FPU state first. 185 */ 186 187 #ifdef NOTDEF_DEBUG 188 printf("cpu_fork()\n"); 189 #endif 190 if (p1 == curproc) { 191 write_user_windows(); 192 193 /* 194 * We're in the kernel, so we don't really care about 195 * %ccr or %asi. We do want to duplicate %pstate and %cwp. 196 */ 197 opcb->pcb_pstate = getpstate(); 198 opcb->pcb_cwp = getcwp(); 199 } 200 #ifdef DIAGNOSTIC 201 else if (p1 != &proc0) 202 panic("cpu_fork: curproc"); 203 #endif 204 #ifdef DEBUG 205 /* prevent us from having NULL lastcall */ 206 opcb->lastcall = cpu_forkname; 207 #else 208 opcb->lastcall = NULL; 209 #endif 210 bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb)); 211 if (p1->p_md.md_fpstate) { 212 fpusave_proc(p1, 1); 213 p2->p_md.md_fpstate = malloc(sizeof(struct fpstate64), 214 M_SUBPROC, M_WAITOK); 215 bcopy(p1->p_md.md_fpstate, p2->p_md.md_fpstate, 216 sizeof(struct fpstate64)); 217 } else 218 p2->p_md.md_fpstate = NULL; 219 220 /* 221 * Setup (kernel) stack frame that will by-pass the child 222 * out of the kernel. (The trap frame invariably resides at 223 * the tippity-top of the u. area.) 224 */ 225 tf2 = p2->p_md.md_tf = (struct trapframe *) 226 ((long)npcb + USPACE - sizeof(*tf2)); 227 228 /* Copy parent's trapframe */ 229 *tf2 = *(struct trapframe *)((long)opcb + USPACE - sizeof(*tf2)); 230 231 /* 232 * If specified, give the child a different stack, offset and 233 * with space reserved for the frame, and zero the frame pointer. 234 */ 235 if (stack != NULL) { 236 tf2->tf_out[6] = (u_int64_t)(u_long)stack - (BIAS + CC64FSZ); 237 tf2->tf_in[6] = 0; 238 } 239 if (tcb != NULL) 240 tf2->tf_global[7] = (u_int64_t)tcb; 241 242 /* Construct kernel frame to return to in cpu_switch() */ 243 rp = (struct rwindow *)((u_long)npcb + TOPFRAMEOFF); 244 *rp = *(struct rwindow *)((u_long)opcb + TOPFRAMEOFF); 245 rp->rw_local[0] = (long)func; /* Function to call */ 246 rp->rw_local[1] = (long)arg; /* and its argument */ 247 248 npcb->pcb_pc = (long)proc_trampoline - 8; 249 npcb->pcb_sp = (long)rp - STACK_OFFSET; 250 251 /* Need to create a %tstate if we're forking from proc0. */ 252 if (p1 == &proc0) 253 tf2->tf_tstate = 254 ((u_int64_t)ASI_PRIMARY_NO_FAULT << TSTATE_ASI_SHIFT) | 255 ((PSTATE_USER) << TSTATE_PSTATE_SHIFT); 256 else 257 /* Clear condition codes and disable FPU. */ 258 tf2->tf_tstate &= 259 ~((PSTATE_PEF << TSTATE_PSTATE_SHIFT) | TSTATE_CCR); 260 261 #ifdef NOTDEF_DEBUG 262 printf("cpu_fork: Copying over trapframe: otf=%p ntf=%p sp=%p opcb=%p npcb=%p\n", 263 (struct trapframe *)((char *)opcb + USPACE - sizeof(*tf2)), tf2, rp, opcb, npcb); 264 printf("cpu_fork: tstate=%lx pc=%lx npc=%lx rsp=%lx\n", 265 (long)tf2->tf_tstate, (long)tf2->tf_pc, (long)tf2->tf_npc, 266 (long)(tf2->tf_out[6])); 267 db_enter(); 268 #endif 269 } 270 271 /* 272 * These are the "function" entry points in locore.s to handle IPI's. 273 */ 274 void ipi_save_fpstate(void); 275 void ipi_drop_fpstate(void); 276 277 void 278 fpusave_cpu(struct cpu_info *ci, int save) 279 { 280 struct proc *p; 281 282 KDASSERT(ci == curcpu()); 283 284 p = ci->ci_fpproc; 285 if (p == NULL) 286 return; 287 288 if (save) 289 savefpstate(p->p_md.md_fpstate); 290 else 291 clearfpstate(); 292 293 ci->ci_fpproc = NULL; 294 } 295 296 void 297 fpusave_proc(struct proc *p, int save) 298 { 299 struct cpu_info *ci = curcpu(); 300 301 #ifdef MULTIPROCESSOR 302 if (p == ci->ci_fpproc) { 303 u_int64_t s = intr_disable(); 304 fpusave_cpu(ci, save); 305 intr_restore(s); 306 return; 307 } 308 309 for (ci = cpus; ci != NULL; ci = ci->ci_next) { 310 if (ci == curcpu()) 311 continue; 312 if (ci->ci_fpproc != p) 313 continue; 314 sparc64_send_ipi(ci->ci_itid, 315 save ? ipi_save_fpstate : ipi_drop_fpstate, (vaddr_t)p, 0); 316 while(ci->ci_fpproc == p) 317 membar_sync(); 318 break; 319 } 320 #else 321 if (p == ci->ci_fpproc) 322 fpusave_cpu(ci, save); 323 #endif 324 } 325 326 /* 327 * cpu_exit is called as the last action during exit. 328 * 329 * We clean up a little and then call sched_exit() with the old proc 330 * as an argument. sched_exit() schedules the old vmspace and stack 331 * to be freed, then selects a new process to run. 332 */ 333 void 334 cpu_exit(struct proc *p) 335 { 336 if (p->p_md.md_fpstate != NULL) { 337 fpusave_proc(p, 0); 338 free(p->p_md.md_fpstate, M_SUBPROC, sizeof(struct fpstate64)); 339 } 340 341 pmap_deactivate(p); 342 sched_exit(p); 343 } 344