/* * Copyright (c) 1988 University of Utah. * Copyright (c) 1992 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and Ralph Campbell. * * %sccs.include.redist.c% * * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$ * * @(#)vm_machdep.c 7.5 (Berkeley) 03/15/92 */ #include "param.h" #include "systm.h" #include "proc.h" #include "malloc.h" #include "buf.h" #include "vnode.h" #include "user.h" #include "vm/vm.h" #include "vm/vm_kern.h" #include "vm/vm_page.h" #include "../include/pte.h" /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the kernel stack and pcb, making the child * ready to run, and marking it so that it can return differently * than the parent. Returns 1 in the child process, 0 in the parent. * We currently double-map the user area so that the stack is at the same * address in each process; in the future we will probably relocate * the frame pointers on the stack after copying. */ cpu_fork(p1, p2) register struct proc *p1, *p2; { register struct user *up = p2->p_addr; register pt_entry_t *pte; register int i; extern struct proc *machFPCurProcPtr; p2->p_md.md_regs = up->u_pcb.pcb_regs; p2->p_md.md_flags = p1->p_md.md_flags & (MDP_FPUSED | MDP_ULTRIX); /* * Convert the user struct virtual address to a physical one * and cache it in the proc struct. Note: if the phyical address * can change (due to memory compaction in kmem_alloc?), * we will have to update things. */ pte = kvtopte(up); for (i = 0; i < UPAGES; i++) { p2->p_md.md_upte[i] = pte->pt_entry & ~PG_G; pte++; } /* * Copy floating point state from the FP chip if this process * has state stored there. */ if (p1 == machFPCurProcPtr) MachSaveCurFPState(p1); /* * Copy pcb and stack from proc p1 to p2. * We do this as cheaply as possible, copying only the active * part of the stack. The stack and pcb need to agree; */ p2->p_addr->u_pcb = p1->p_addr->u_pcb; /* * Arrange for a non-local goto when the new process * is started, to resume here, returning nonzero from setjmp. */ #ifdef DIAGNOSTIC if (p1 != curproc) panic("cpu_fork: curproc"); #endif if (copykstack(up)) { /* * Return 1 in child. */ return (1); } return (0); } /* * cpu_exit is called as the last action during exit. * We release the address space and machine-dependent resources, * including the memory for the user structure and kernel stack. * Once finished, we call swtch_exit, which switches to a temporary * pcb and stack and never returns. We block memory allocation * until swtch_exit has made things safe again. */ cpu_exit(p) struct proc *p; { extern struct proc *machFPCurProcPtr; if (machFPCurProcPtr == p) machFPCurProcPtr = (struct proc *)0; vmspace_free(p->p_vmspace); (void) splhigh(); kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES)); swtch_exit(); /* NOTREACHED */ } /* * Dump the machine specific header information at the start of a core dump. */ cpu_coredump(p, vp, cred) struct proc *p; struct vnode *vp; struct ucred *cred; { extern struct proc *machFPCurProcPtr; /* * Copy floating point state from the FP chip if this process * has state stored there. */ if (p == machFPCurProcPtr) MachSaveCurFPState(p); return (vn_rdwr(UIO_WRITE, vp, (caddr_t)p->p_addr, ctob(UPAGES), (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p)); } /* * Move pages from one kernel virtual address to another. * Both addresses are assumed to reside in the Sysmap, * and size must be a multiple of CLSIZE. */ pagemove(from, to, size) register caddr_t from, to; int size; { register pt_entry_t *fpte, *tpte; if (size % CLBYTES) panic("pagemove"); fpte = kvtopte(from); tpte = kvtopte(to); while (size > 0) { MachTLBFlushAddr(from); MachTLBUpdate(to, *fpte); *tpte++ = *fpte; fpte->pt_entry = 0; fpte++; size -= NBPG; from += NBPG; to += NBPG; } } extern vm_map_t phys_map; /* * Map an IO request into kernel virtual address space. Requests fall into * one of five catagories: * * B_PHYS|B_UAREA: User u-area swap. * Address is relative to start of u-area (p_addr). * B_PHYS|B_PAGET: User page table swap. * Address is a kernel VA in usrpt (Usrptmap). * B_PHYS|B_DIRTY: Dirty page push. * Address is a VA in proc2's address space. * B_PHYS|B_PGIN: Kernel pagein of user pages. * Address is VA in user's address space. * B_PHYS: User "raw" IO request. * Address is VA in user's address space. * * All requests are (re)mapped into kernel VA space via the phys_map */ vmapbuf(bp) register struct buf *bp; { register caddr_t addr; register vm_size_t sz; struct proc *p; int off; vm_offset_t kva; register vm_offset_t pa; if ((bp->b_flags & B_PHYS) == 0) panic("vmapbuf"); addr = bp->b_saveaddr = bp->b_un.b_addr; off = (int)addr & PGOFSET; p = bp->b_proc; sz = round_page(bp->b_bcount + off); kva = kmem_alloc_wait(phys_map, sz); bp->b_un.b_addr = (caddr_t) (kva + off); sz = atop(sz); while (sz--) { pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), (vm_offset_t)addr); if (pa == 0) panic("vmapbuf: null page frame"); pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa), VM_PROT_READ|VM_PROT_WRITE, TRUE); addr += PAGE_SIZE; kva += PAGE_SIZE; } } /* * Free the io map PTEs associated with this IO operation. * We also invalidate the TLB entries and restore the original b_addr. */ vunmapbuf(bp) register struct buf *bp; { register caddr_t addr = bp->b_un.b_addr; register vm_size_t sz; vm_offset_t kva; if ((bp->b_flags & B_PHYS) == 0) panic("vunmapbuf"); sz = round_page(bp->b_bcount + ((int)addr & PGOFSET)); kva = (vm_offset_t)((int)addr & ~PGOFSET); kmem_free_wakeup(phys_map, kva, sz); bp->b_un.b_addr = bp->b_saveaddr; bp->b_saveaddr = NULL; }