/* * Copyright (c) 1988 University of Utah. * Copyright (c) 1982, 1986, 1990, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department. * * %sccs.include.redist.c% * * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$ * * @(#)vm_machdep.c 8.5 (Berkeley) 01/04/94 */ #include #include #include #include #include #include #include #include #include #include #include /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the kernel stack and pcb, making the child * ready to run, and marking it so that it can return differently * than the parent. Returns 1 in the child process, 0 in the parent. * We currently double-map the user area so that the stack is at the same * address in each process; in the future we will probably relocate * the frame pointers on the stack after copying. */ cpu_fork(p1, p2) register struct proc *p1, *p2; { register struct user *up = p2->p_addr; int offset; extern caddr_t getsp(); extern char kstack[]; p2->p_md.md_regs = p1->p_md.md_regs; p2->p_md.md_flags = (p1->p_md.md_flags & ~(MDP_AST|MDP_HPUXTRACE)); /* * Copy pcb and stack from proc p1 to p2. * We do this as cheaply as possible, copying only the active * part of the stack. The stack and pcb need to agree; * this is tricky, as the final pcb is constructed by savectx, * but its frame isn't yet on the stack when the stack is copied. * switch compensates for this when the child eventually runs. * This should be done differently, with a single call * that copies and updates the pcb+stack, * replacing the bcopy and savectx. */ p2->p_addr->u_pcb = p1->p_addr->u_pcb; offset = getsp() - kstack; bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset, (unsigned) ctob(UPAGES) - offset); PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0); /* * Arrange for a non-local goto when the new process * is started, to resume here, returning nonzero from setjmp. */ if (savectx(up, 1)) { /* * Return 1 in child. */ return (1); } return (0); } /* * cpu_exit is called as the last action during exit. * We release the address space and machine-dependent resources, * including the memory for the user structure and kernel stack. * Once finished, we call switch_exit, which switches to a temporary * pcb and stack and never returns. We block memory allocation * until switch_exit has made things safe again. */ cpu_exit(p) struct proc *p; { vmspace_free(p->p_vmspace); (void) splimp(); kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); switch_exit(); /* NOTREACHED */ } /* * Dump the machine specific header information at the start of a core dump. */ cpu_coredump(p, vp, cred) struct proc *p; struct vnode *vp; struct ucred *cred; { #ifdef HPUXCOMPAT /* * If we loaded from an HP-UX format binary file we dump enough * of an HP-UX style user struct so that the HP-UX debuggers can * grok it. */ if (p->p_md.md_flags & MDP_HPUX) return (hpuxdumpu(vp, cred)); #endif return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL, p)); } /* * Move pages from one kernel virtual address to another. * Both addresses are assumed to reside in the Sysmap, * and size must be a multiple of CLSIZE. */ pagemove(from, to, size) register caddr_t from, to; int size; { register struct pte *fpte, *tpte; if (size % CLBYTES) panic("pagemove"); fpte = kvtopte(from); tpte = kvtopte(to); while (size > 0) { *tpte++ = *fpte; *(int *)fpte++ = PG_NV; TBIS(from); TBIS(to); from += NBPG; to += NBPG; size -= NBPG; } DCIS(); } /* * Map `size' bytes of physical memory starting at `paddr' into * kernel VA space at `vaddr'. Read/write and cache-inhibit status * are specified by `prot'. */ physaccess(vaddr, paddr, size, prot) caddr_t vaddr, paddr; register int size, prot; { register struct pte *pte; register u_int page; pte = kvtopte(vaddr); page = (u_int)paddr & PG_FRAME; for (size = btoc(size); size; size--) { *(int *)pte++ = PG_V | prot | page; page += NBPG; } TBIAS(); } physunaccess(vaddr, size) caddr_t vaddr; register int size; { register struct pte *pte; pte = kvtopte(vaddr); for (size = btoc(size); size; size--) *(int *)pte++ = PG_NV; TBIAS(); } /* * Set a red zone in the kernel stack after the u. area. * We don't support a redzone right now. It really isn't clear * that it is a good idea since, if the kernel stack were to roll * into a write protected page, the processor would lock up (since * it cannot create an exception frame) and we would get no useful * post-mortem info. Currently, under the DEBUG option, we just * check at every clock interrupt to see if the current k-stack has * gone too far (i.e. into the "redzone" page) and if so, panic. * Look at _lev6intr in locore.s for more details. */ /*ARGSUSED*/ setredzone(pte, vaddr) struct pte *pte; caddr_t vaddr; { } /* * Convert kernel VA to physical address */ kvtop(addr) register caddr_t addr; { vm_offset_t va; va = pmap_extract(kernel_pmap, (vm_offset_t)addr); if (va == 0) panic("kvtop: zero page frame"); return((int)va); } extern vm_map_t phys_map; /* * Map an IO request into kernel virtual address space. * * XXX we allocate KVA space by using kmem_alloc_wait which we know * allocates space without backing physical memory. This implementation * is a total crock, the multiple mappings of these physical pages should * be reflected in the higher-level VM structures to avoid problems. */ vmapbuf(bp) register struct buf *bp; { register int npf; register caddr_t addr; register long flags = bp->b_flags; struct proc *p; int off; vm_offset_t kva; register vm_offset_t pa; if ((flags & B_PHYS) == 0) panic("vmapbuf"); addr = bp->b_saveaddr = bp->b_data; off = (int)addr & PGOFSET; p = bp->b_proc; npf = btoc(round_page(bp->b_bcount + off)); kva = kmem_alloc_wait(phys_map, ctob(npf)); bp->b_data = (caddr_t)(kva + off); while (npf--) { pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), (vm_offset_t)addr); if (pa == 0) panic("vmapbuf: null page frame"); pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), VM_PROT_READ|VM_PROT_WRITE, TRUE); addr += PAGE_SIZE; kva += PAGE_SIZE; } } /* * Free the io map PTEs associated with this IO operation. */ vunmapbuf(bp) register struct buf *bp; { register caddr_t addr; register int npf; vm_offset_t kva; if ((bp->b_flags & B_PHYS) == 0) panic("vunmapbuf"); addr = bp->b_data; npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET))); kva = (vm_offset_t)((int)addr & ~PGOFSET); kmem_free_wakeup(phys_map, kva, ctob(npf)); bp->b_data = bp->b_saveaddr; bp->b_saveaddr = NULL; } #ifdef MAPPEDCOPY u_int mappedcopysize = 4096; mappedcopyin(fromp, top, count) register char *fromp, *top; register int count; { register vm_offset_t kva, upa; register int off, len; int alignable; pmap_t upmap; extern caddr_t CADDR1; kva = (vm_offset_t) CADDR1; off = (vm_offset_t)fromp & PAGE_MASK; alignable = (off == ((vm_offset_t)top & PAGE_MASK)); upmap = vm_map_pmap(&curproc->p_vmspace->vm_map); while (count > 0) { /* * First access of a page, use fubyte to make sure * page is faulted in and read access allowed. */ if (fubyte(fromp) == -1) return (EFAULT); /* * Map in the page and bcopy data in from it */ upa = pmap_extract(upmap, trunc_page(fromp)); if (upa == 0) panic("mappedcopyin"); len = min(count, PAGE_SIZE-off); pmap_enter(kernel_pmap, kva, upa, VM_PROT_READ, TRUE); if (len == PAGE_SIZE && alignable && off == 0) copypage(kva, top); else bcopy((caddr_t)(kva+off), top, len); fromp += len; top += len; count -= len; off = 0; } pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE); return (0); } mappedcopyout(fromp, top, count) register char *fromp, *top; register int count; { register vm_offset_t kva, upa; register int off, len; int alignable; pmap_t upmap; extern caddr_t CADDR2; kva = (vm_offset_t) CADDR2; off = (vm_offset_t)top & PAGE_MASK; alignable = (off == ((vm_offset_t)fromp & PAGE_MASK)); upmap = vm_map_pmap(&curproc->p_vmspace->vm_map); while (count > 0) { /* * First access of a page, use subyte to make sure * page is faulted in and write access allowed. */ if (subyte(top, *fromp) == -1) return (EFAULT); /* * Map in the page and bcopy data out to it */ upa = pmap_extract(upmap, trunc_page(top)); if (upa == 0) panic("mappedcopyout"); len = min(count, PAGE_SIZE-off); pmap_enter(kernel_pmap, kva, upa, VM_PROT_READ|VM_PROT_WRITE, TRUE); if (len == PAGE_SIZE && alignable && off == 0) copypage(fromp, kva); else bcopy(fromp, (caddr_t)(kva+off), len); fromp += len; top += len; count -= len; off = 0; } pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE); return (0); } #endif