1 /* 2 * Copyright (c) 1988 University of Utah. 3 * Copyright (c) 1982, 1986, 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$ 13 * 14 * @(#)vm_machdep.c 8.5 (Berkeley) 01/04/94 15 */ 16 17 #include <sys/param.h> 18 #include <sys/systm.h> 19 #include <sys/proc.h> 20 #include <sys/malloc.h> 21 #include <sys/buf.h> 22 #include <sys/vnode.h> 23 #include <sys/user.h> 24 25 #include <machine/cpu.h> 26 27 #include <vm/vm.h> 28 #include <vm/vm_kern.h> 29 #include <hp300/hp300/pte.h> 30 31 /* 32 * Finish a fork operation, with process p2 nearly set up. 33 * Copy and update the kernel stack and pcb, making the child 34 * ready to run, and marking it so that it can return differently 35 * than the parent. Returns 1 in the child process, 0 in the parent. 36 * We currently double-map the user area so that the stack is at the same 37 * address in each process; in the future we will probably relocate 38 * the frame pointers on the stack after copying. 39 */ 40 cpu_fork(p1, p2) 41 register struct proc *p1, *p2; 42 { 43 register struct user *up = p2->p_addr; 44 int offset; 45 extern caddr_t getsp(); 46 extern char kstack[]; 47 48 p2->p_md.md_regs = p1->p_md.md_regs; 49 p2->p_md.md_flags = (p1->p_md.md_flags & ~(MDP_AST|MDP_HPUXTRACE)); 50 51 /* 52 * Copy pcb and stack from proc p1 to p2. 53 * We do this as cheaply as possible, copying only the active 54 * part of the stack. The stack and pcb need to agree; 55 * this is tricky, as the final pcb is constructed by savectx, 56 * but its frame isn't yet on the stack when the stack is copied. 57 * switch compensates for this when the child eventually runs. 58 * This should be done differently, with a single call 59 * that copies and updates the pcb+stack, 60 * replacing the bcopy and savectx. 61 */ 62 p2->p_addr->u_pcb = p1->p_addr->u_pcb; 63 offset = getsp() - kstack; 64 bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, 65 (unsigned) ctob(UPAGES) - offset); 66 67 PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0); 68 69 /* 70 * Arrange for a non-local goto when the new process 71 * is started, to resume here, returning nonzero from setjmp. 72 */ 73 if (savectx(up, 1)) { 74 /* 75 * Return 1 in child. 76 */ 77 return (1); 78 } 79 return (0); 80 } 81 82 /* 83 * cpu_exit is called as the last action during exit. 84 * We release the address space and machine-dependent resources, 85 * including the memory for the user structure and kernel stack. 86 * Once finished, we call switch_exit, which switches to a temporary 87 * pcb and stack and never returns. We block memory allocation 88 * until switch_exit has made things safe again. 89 */ 90 cpu_exit(p) 91 struct proc *p; 92 { 93 94 vmspace_free(p->p_vmspace); 95 96 (void) splimp(); 97 kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); 98 switch_exit(); 99 /* NOTREACHED */ 100 } 101 102 /* 103 * Dump the machine specific header information at the start of a core dump. 104 */ 105 cpu_coredump(p, vp, cred) 106 struct proc *p; 107 struct vnode *vp; 108 struct ucred *cred; 109 { 110 #ifdef HPUXCOMPAT 111 /* 112 * If we loaded from an HP-UX format binary file we dump enough 113 * of an HP-UX style user struct so that the HP-UX debuggers can 114 * grok it. 115 */ 116 if (p->p_md.md_flags & MDP_HPUX) 117 return (hpuxdumpu(vp, cred)); 118 #endif 119 return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), 120 (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL, 121 p)); 122 } 123 124 /* 125 * Move pages from one kernel virtual address to another. 126 * Both addresses are assumed to reside in the Sysmap, 127 * and size must be a multiple of CLSIZE. 128 */ 129 pagemove(from, to, size) 130 register caddr_t from, to; 131 int size; 132 { 133 register struct pte *fpte, *tpte; 134 135 if (size % CLBYTES) 136 panic("pagemove"); 137 fpte = kvtopte(from); 138 tpte = kvtopte(to); 139 while (size > 0) { 140 *tpte++ = *fpte; 141 *(int *)fpte++ = PG_NV; 142 TBIS(from); 143 TBIS(to); 144 from += NBPG; 145 to += NBPG; 146 size -= NBPG; 147 } 148 DCIS(); 149 } 150 151 /* 152 * Map `size' bytes of physical memory starting at `paddr' into 153 * kernel VA space at `vaddr'. Read/write and cache-inhibit status 154 * are specified by `prot'. 155 */ 156 physaccess(vaddr, paddr, size, prot) 157 caddr_t vaddr, paddr; 158 register int size, prot; 159 { 160 register struct pte *pte; 161 register u_int page; 162 163 pte = kvtopte(vaddr); 164 page = (u_int)paddr & PG_FRAME; 165 for (size = btoc(size); size; size--) { 166 *(int *)pte++ = PG_V | prot | page; 167 page += NBPG; 168 } 169 TBIAS(); 170 } 171 172 physunaccess(vaddr, size) 173 caddr_t vaddr; 174 register int size; 175 { 176 register struct pte *pte; 177 178 pte = kvtopte(vaddr); 179 for (size = btoc(size); size; size--) 180 *(int *)pte++ = PG_NV; 181 TBIAS(); 182 } 183 184 /* 185 * Set a red zone in the kernel stack after the u. area. 186 * We don't support a redzone right now. It really isn't clear 187 * that it is a good idea since, if the kernel stack were to roll 188 * into a write protected page, the processor would lock up (since 189 * it cannot create an exception frame) and we would get no useful 190 * post-mortem info. Currently, under the DEBUG option, we just 191 * check at every clock interrupt to see if the current k-stack has 192 * gone too far (i.e. into the "redzone" page) and if so, panic. 193 * Look at _lev6intr in locore.s for more details. 194 */ 195 /*ARGSUSED*/ 196 setredzone(pte, vaddr) 197 struct pte *pte; 198 caddr_t vaddr; 199 { 200 } 201 202 /* 203 * Convert kernel VA to physical address 204 */ 205 kvtop(addr) 206 register caddr_t addr; 207 { 208 vm_offset_t va; 209 210 va = pmap_extract(kernel_pmap, (vm_offset_t)addr); 211 if (va == 0) 212 panic("kvtop: zero page frame"); 213 return((int)va); 214 } 215 216 extern vm_map_t phys_map; 217 218 /* 219 * Map an IO request into kernel virtual address space. 220 * 221 * XXX we allocate KVA space by using kmem_alloc_wait which we know 222 * allocates space without backing physical memory. This implementation 223 * is a total crock, the multiple mappings of these physical pages should 224 * be reflected in the higher-level VM structures to avoid problems. 225 */ 226 vmapbuf(bp) 227 register struct buf *bp; 228 { 229 register int npf; 230 register caddr_t addr; 231 register long flags = bp->b_flags; 232 struct proc *p; 233 int off; 234 vm_offset_t kva; 235 register vm_offset_t pa; 236 237 if ((flags & B_PHYS) == 0) 238 panic("vmapbuf"); 239 addr = bp->b_saveaddr = bp->b_data; 240 off = (int)addr & PGOFSET; 241 p = bp->b_proc; 242 npf = btoc(round_page(bp->b_bcount + off)); 243 kva = kmem_alloc_wait(phys_map, ctob(npf)); 244 bp->b_data = (caddr_t)(kva + off); 245 while (npf--) { 246 pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), 247 (vm_offset_t)addr); 248 if (pa == 0) 249 panic("vmapbuf: null page frame"); 250 pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), 251 VM_PROT_READ|VM_PROT_WRITE, TRUE); 252 addr += PAGE_SIZE; 253 kva += PAGE_SIZE; 254 } 255 } 256 257 /* 258 * Free the io map PTEs associated with this IO operation. 259 */ 260 vunmapbuf(bp) 261 register struct buf *bp; 262 { 263 register caddr_t addr; 264 register int npf; 265 vm_offset_t kva; 266 267 if ((bp->b_flags & B_PHYS) == 0) 268 panic("vunmapbuf"); 269 addr = bp->b_data; 270 npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); 271 kva = (vm_offset_t)((int)addr & ~PGOFSET); 272 kmem_free_wakeup(phys_map, kva, ctob(npf)); 273 bp->b_data = bp->b_saveaddr; 274 bp->b_saveaddr = NULL; 275 } 276 277 #ifdef MAPPEDCOPY 278 u_int mappedcopysize = 4096; 279 280 mappedcopyin(fromp, top, count) 281 register char *fromp, *top; 282 register int count; 283 { 284 register vm_offset_t kva, upa; 285 register int off, len; 286 int alignable; 287 pmap_t upmap; 288 extern caddr_t CADDR1; 289 290 kva = (vm_offset_t) CADDR1; 291 off = (vm_offset_t)fromp & PAGE_MASK; 292 alignable = (off == ((vm_offset_t)top & PAGE_MASK)); 293 upmap = vm_map_pmap(&curproc->p_vmspace->vm_map); 294 while (count > 0) { 295 /* 296 * First access of a page, use fubyte to make sure 297 * page is faulted in and read access allowed. 298 */ 299 if (fubyte(fromp) == -1) 300 return (EFAULT); 301 /* 302 * Map in the page and bcopy data in from it 303 */ 304 upa = pmap_extract(upmap, trunc_page(fromp)); 305 if (upa == 0) 306 panic("mappedcopyin"); 307 len = min(count, PAGE_SIZE-off); 308 pmap_enter(kernel_pmap, kva, upa, VM_PROT_READ, TRUE); 309 if (len == PAGE_SIZE && alignable && off == 0) 310 copypage(kva, top); 311 else 312 bcopy((caddr_t)(kva+off), top, len); 313 fromp += len; 314 top += len; 315 count -= len; 316 off = 0; 317 } 318 pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE); 319 return (0); 320 } 321 322 mappedcopyout(fromp, top, count) 323 register char *fromp, *top; 324 register int count; 325 { 326 register vm_offset_t kva, upa; 327 register int off, len; 328 int alignable; 329 pmap_t upmap; 330 extern caddr_t CADDR2; 331 332 kva = (vm_offset_t) CADDR2; 333 off = (vm_offset_t)top & PAGE_MASK; 334 alignable = (off == ((vm_offset_t)fromp & PAGE_MASK)); 335 upmap = vm_map_pmap(&curproc->p_vmspace->vm_map); 336 while (count > 0) { 337 /* 338 * First access of a page, use subyte to make sure 339 * page is faulted in and write access allowed. 340 */ 341 if (subyte(top, *fromp) == -1) 342 return (EFAULT); 343 /* 344 * Map in the page and bcopy data out to it 345 */ 346 upa = pmap_extract(upmap, trunc_page(top)); 347 if (upa == 0) 348 panic("mappedcopyout"); 349 len = min(count, PAGE_SIZE-off); 350 pmap_enter(kernel_pmap, kva, upa, 351 VM_PROT_READ|VM_PROT_WRITE, TRUE); 352 if (len == PAGE_SIZE && alignable && off == 0) 353 copypage(fromp, kva); 354 else 355 bcopy(fromp, (caddr_t)(kva+off), len); 356 fromp += len; 357 top += len; 358 count -= len; 359 off = 0; 360 } 361 pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE); 362 return (0); 363 } 364 #endif 365