1 /* $NetBSD: vm_machdep.c,v 1.92 2002/11/12 14:00:42 nisimura Exp $ */ 2 3 /* 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1992, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department and Ralph Campbell. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: Utah Hdr: vm_machdep.c 1.21 91/04/06 41 * 42 * @(#)vm_machdep.c 8.3 (Berkeley) 1/4/94 43 */ 44 45 #include "opt_ddb.h" 46 47 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 48 __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.92 2002/11/12 14:00:42 nisimura Exp $"); 49 50 #include <sys/param.h> 51 #include <sys/systm.h> 52 #include <sys/proc.h> 53 #include <sys/malloc.h> 54 #include <sys/buf.h> 55 #include <sys/vnode.h> 56 #include <sys/user.h> 57 #include <sys/core.h> 58 #include <sys/exec.h> 59 60 #include <uvm/uvm_extern.h> 61 62 #include <mips/cache.h> 63 #include <mips/regnum.h> 64 #include <mips/locore.h> 65 #include <mips/pte.h> 66 #include <mips/psl.h> 67 #include <machine/cpu.h> 68 69 paddr_t kvtophys(vaddr_t); /* XXX */ 70 71 /* 72 * Finish a fork operation, with process p2 nearly set up. 73 * Copy and update the pcb and trap frame, making the child ready to run. 74 * 75 * Rig the child's kernel stack so that it will start out in 76 * proc_trampoline() and call child_return() with p2 as an 77 * argument. This causes the newly-created child process to go 78 * directly to user level with an apparent return value of 0 from 79 * fork(), while the parent process returns normally. 80 * 81 * p1 is the process being forked; if p1 == &proc0, we are creating 82 * a kernel thread, and the return path and argument are specified with 83 * `func' and `arg'. 84 * 85 * If an alternate user-level stack is requested (with non-zero values 86 * in both the stack and stacksize args), set up the user stack pointer 87 * accordingly. 88 */ 89 void 90 cpu_fork(p1, p2, stack, stacksize, func, arg) 91 struct proc *p1, *p2; 92 void *stack; 93 size_t stacksize; 94 void (*func)(void *); 95 void *arg; 96 { 97 struct pcb *pcb; 98 struct frame *f; 99 pt_entry_t *pte; 100 int i, x; 101 102 #ifdef MIPS3_PLUS 103 /* 104 * To eliminate virtual aliases created by pmap_zero_page(), 105 * this cache flush operation is necessary. 106 * VCED on kernel stack is not allowed. 107 * XXXJRT Confirm that this is necessry, and/or fix 108 * XXXJRT pmap_zero_page(). 109 */ 110 if (CPUISMIPS3 && mips_sdcache_line_size) 111 mips_dcache_wbinv_range((vaddr_t) p2->p_addr, USPACE); 112 #endif 113 114 #ifdef DIAGNOSTIC 115 /* 116 * If p1 != curproc && p1 == &proc0, we're creating a kernel thread. 117 */ 118 if (p1 != curproc && p1 != &proc0) 119 panic("cpu_fork: curproc"); 120 #endif 121 if ((p1->p_md.md_flags & MDP_FPUSED) && p1 == fpcurproc) 122 savefpregs(p1); 123 124 /* 125 * Copy pcb from proc p1 to p2. 126 * Copy p1 trapframe atop on p2 stack space, so return to user mode 127 * will be to right address, with correct registers. 128 */ 129 memcpy(&p2->p_addr->u_pcb, &p1->p_addr->u_pcb, sizeof(struct pcb)); 130 f = (struct frame *)((caddr_t)p2->p_addr + USPACE) - 1; 131 memcpy(f, p1->p_md.md_regs, sizeof(struct frame)); 132 133 /* 134 * If specified, give the child a different stack. 135 */ 136 if (stack != NULL) 137 f->f_regs[SP] = (u_int)stack + stacksize; 138 139 p2->p_md.md_regs = (void *)f; 140 p2->p_md.md_flags = p1->p_md.md_flags & MDP_FPUSED; 141 x = (MIPS_HAS_R4K_MMU) ? (MIPS3_PG_G|MIPS3_PG_RO|MIPS3_PG_WIRED) : MIPS1_PG_G; 142 pte = kvtopte(p2->p_addr); 143 for (i = 0; i < UPAGES; i++) 144 p2->p_md.md_upte[i] = pte[i].pt_entry &~ x; 145 146 pcb = &p2->p_addr->u_pcb; 147 pcb->pcb_context[10] = (int)proc_trampoline; /* RA */ 148 pcb->pcb_context[8] = (int)f; /* SP */ 149 pcb->pcb_context[0] = (int)func; /* S0 */ 150 pcb->pcb_context[1] = (int)arg; /* S1 */ 151 pcb->pcb_context[11] |= PSL_LOWIPL; /* SR */ 152 #ifdef IPL_ICU_MASK 153 pcb->pcb_ppl = 0; /* machine dependent interrupt mask */ 154 #endif 155 } 156 157 /* 158 * Finish a swapin operation. 159 * We neded to update the cached PTEs for the user area in the 160 * machine dependent part of the proc structure. 161 */ 162 void 163 cpu_swapin(p) 164 struct proc *p; 165 { 166 pt_entry_t *pte; 167 int i, x; 168 169 /* 170 * Cache the PTEs for the user area in the machine dependent 171 * part of the proc struct so cpu_switch() can quickly map in 172 * the user struct and kernel stack. 173 */ 174 x = (MIPS_HAS_R4K_MMU) ? (MIPS3_PG_G|MIPS3_PG_RO|MIPS3_PG_WIRED) : MIPS1_PG_G; 175 pte = kvtopte(p->p_addr); 176 for (i = 0; i < UPAGES; i++) 177 p->p_md.md_upte[i] = pte[i].pt_entry &~ x; 178 } 179 180 /* 181 * cpu_exit is called as the last action during exit. 182 * 183 * We clean up a little and then call switch_exit() with the old proc as an 184 * argument. switch_exit() first switches to proc0's PCB and stack, 185 * schedules the dead proc's vmspace and stack to be freed, then jumps 186 * into the middle of cpu_switch(), as if it were switching from proc0. 187 */ 188 void 189 cpu_exit(p) 190 struct proc *p; 191 { 192 void switch_exit(struct proc *); 193 194 if ((p->p_md.md_flags & MDP_FPUSED) && p == fpcurproc) 195 fpcurproc = (struct proc *)0; 196 197 uvmexp.swtch++; 198 (void)splhigh(); 199 switch_exit(p); 200 /* NOTREACHED */ 201 } 202 203 /* 204 * Dump the machine specific segment at the start of a core dump. 205 */ 206 int 207 cpu_coredump(p, vp, cred, chdr) 208 struct proc *p; 209 struct vnode *vp; 210 struct ucred *cred; 211 struct core *chdr; 212 { 213 int error; 214 struct coreseg cseg; 215 struct cpustate { 216 struct frame frame; 217 struct fpreg fpregs; 218 } cpustate; 219 220 CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); 221 chdr->c_hdrsize = ALIGN(sizeof(struct core)); 222 chdr->c_seghdrsize = ALIGN(sizeof(struct coreseg)); 223 chdr->c_cpusize = sizeof(struct cpustate); 224 225 if ((p->p_md.md_flags & MDP_FPUSED) && p == fpcurproc) 226 savefpregs(p); 227 cpustate.frame = *(struct frame *)p->p_md.md_regs; 228 cpustate.fpregs = p->p_addr->u_pcb.pcb_fpregs; 229 230 CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); 231 cseg.c_addr = 0; 232 cseg.c_size = chdr->c_cpusize; 233 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cseg, chdr->c_seghdrsize, 234 (off_t)chdr->c_hdrsize, UIO_SYSSPACE, 235 IO_NODELOCKED|IO_UNIT, cred, NULL, p); 236 if (error) 237 return error; 238 239 error = vn_rdwr(UIO_WRITE, vp, (caddr_t)&cpustate, 240 (off_t)chdr->c_cpusize, 241 (off_t)(chdr->c_hdrsize + chdr->c_seghdrsize), 242 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, 243 cred, NULL, p); 244 245 if (!error) 246 chdr->c_nseg++; 247 248 return error; 249 } 250 251 /* 252 * Move pages from one kernel virtual address to another. 253 * Both addresses are assumed to reside in the Sysmap, 254 * and size must be a multiple of NBPG. 255 */ 256 void 257 pagemove(from, to, size) 258 caddr_t from, to; 259 size_t size; 260 { 261 pt_entry_t *fpte, *tpte; 262 paddr_t invalid; 263 264 if (size % NBPG) 265 panic("pagemove"); 266 fpte = kvtopte(from); 267 tpte = kvtopte(to); 268 #ifdef MIPS3_PLUS 269 if (CPUISMIPS3 && 270 (mips_cache_indexof(from) != mips_cache_indexof(to))) 271 mips_dcache_wbinv_range((vaddr_t) from, size); 272 #endif 273 invalid = (MIPS_HAS_R4K_MMU) ? MIPS3_PG_NV | MIPS3_PG_G : MIPS1_PG_NV; 274 while (size > 0) { 275 tpte->pt_entry = fpte->pt_entry; 276 fpte->pt_entry = invalid; 277 MIPS_TBIS((vaddr_t)from); 278 MIPS_TBIS((vaddr_t)to); 279 fpte++; tpte++; 280 size -= PAGE_SIZE; 281 from += PAGE_SIZE; 282 to += NBPG; 283 } 284 } 285 286 /* 287 * Map a user I/O request into kernel virtual address space. 288 */ 289 void 290 vmapbuf(bp, len) 291 struct buf *bp; 292 vsize_t len; 293 { 294 struct pmap *upmap; 295 vaddr_t uva; /* User VA (map from) */ 296 vaddr_t kva; /* Kernel VA (new to) */ 297 paddr_t pa; /* physical address */ 298 vsize_t off; 299 300 if ((bp->b_flags & B_PHYS) == 0) 301 panic("vmapbuf"); 302 303 uva = mips_trunc_page(bp->b_saveaddr = bp->b_data); 304 off = (vaddr_t)bp->b_data - uva; 305 len = mips_round_page(off + len); 306 kva = uvm_km_valloc_prefer_wait(phys_map, len, uva); 307 bp->b_data = (caddr_t)(kva + off); 308 309 upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); 310 do { 311 if (pmap_extract(upmap, uva, &pa) == FALSE) 312 panic("vmapbuf: null page frame"); 313 pmap_enter(vm_map_pmap(phys_map), kva, pa, 314 VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED); 315 uva += PAGE_SIZE; 316 kva += PAGE_SIZE; 317 len -= PAGE_SIZE; 318 } while (len); 319 pmap_update(vm_map_pmap(phys_map)); 320 } 321 322 /* 323 * Unmap a previously-mapped user I/O request. 324 */ 325 void 326 vunmapbuf(bp, len) 327 struct buf *bp; 328 vsize_t len; 329 { 330 vaddr_t kva; 331 vsize_t off; 332 333 if ((bp->b_flags & B_PHYS) == 0) 334 panic("vunmapbuf"); 335 336 kva = mips_trunc_page(bp->b_data); 337 off = (vaddr_t)bp->b_data - kva; 338 len = mips_round_page(off + len); 339 pmap_remove(vm_map_pmap(phys_map), kva, kva + len); 340 pmap_update(pmap_kernel()); 341 uvm_km_free_wakeup(phys_map, kva, len); 342 bp->b_data = bp->b_saveaddr; 343 bp->b_saveaddr = NULL; 344 } 345 346 /* 347 * Map a (kernel) virtual address to a physical address. 348 * 349 * MIPS processor has 3 distinct kernel address ranges: 350 * 351 * - kseg0 kernel "virtual address" for the cached physical address space. 352 * - kseg1 kernel "virtual address" for the uncached physical address space. 353 * - kseg2 normal kernel "virtual address" mapped via the TLB. 354 */ 355 paddr_t 356 kvtophys(kva) 357 vaddr_t kva; 358 { 359 pt_entry_t *pte; 360 paddr_t phys; 361 362 if (kva >= MIPS_KSEG2_START) { 363 if (kva >= VM_MAX_KERNEL_ADDRESS) 364 goto overrun; 365 366 pte = kvtopte(kva); 367 if ((size_t) (pte - Sysmap) > Sysmapsize) { 368 printf("oops: Sysmap overrun, max %d index %d\n", 369 Sysmapsize, pte - Sysmap); 370 } 371 if (!mips_pg_v(pte->pt_entry)) { 372 printf("kvtophys: pte not valid for %lx\n", kva); 373 } 374 phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET); 375 return phys; 376 } 377 if (kva >= MIPS_KSEG1_START) 378 return MIPS_KSEG1_TO_PHYS(kva); 379 380 if (kva >= MIPS_KSEG0_START) 381 return MIPS_KSEG0_TO_PHYS(kva); 382 383 overrun: 384 printf("Virtual address %lx: cannot map to physical\n", kva); 385 #ifdef DDB 386 Debugger(); 387 return 0; /* XXX */ 388 #endif 389 panic("kvtophys"); 390 } 391