1 /* 2 * Copyright (c) 1992, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This software was developed by the Computer Systems Engineering group 6 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 7 * contributed to Berkeley. 8 * 9 * All advertising materials mentioning features or use of this software 10 * must display the following acknowledgement: 11 * This product includes software developed by the University of 12 * California, Lawrence Berkeley Laboratory. 13 * 14 * %sccs.include.redist.c% 15 * 16 * @(#)vm_machdep.c 8.1 (Berkeley) 06/11/93 17 * 18 * from: $Header: vm_machdep.c,v 1.10 92/11/26 03:05:11 torek Exp $ (LBL) 19 */ 20 21 #include <sys/param.h> 22 #include <sys/systm.h> 23 #include <sys/proc.h> 24 #include <sys/user.h> 25 #include <sys/malloc.h> 26 #include <sys/buf.h> 27 #include <sys/exec.h> 28 #include <sys/vnode.h> 29 30 #include <vm/vm.h> 31 #include <vm/vm_kern.h> 32 33 #include <machine/cpu.h> 34 #include <machine/frame.h> 35 36 /* 37 * Move pages from one kernel virtual address to another. 38 */ 39 pagemove(from, to, size) 40 register caddr_t from, to; 41 int size; 42 { 43 register vm_offset_t pa; 44 45 if (size & CLOFSET || (int)from & CLOFSET || (int)to & CLOFSET) 46 panic("pagemove 1"); 47 while (size > 0) { 48 pa = pmap_extract(kernel_pmap, (vm_offset_t)from); 49 if (pa == 0) 50 panic("pagemove 2"); 51 pmap_remove(kernel_pmap, 52 (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE); 53 pmap_enter(kernel_pmap, 54 (vm_offset_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1); 55 from += PAGE_SIZE; 56 to += PAGE_SIZE; 57 size -= PAGE_SIZE; 58 } 59 } 60 61 /* 62 * Map an IO request into kernel virtual address space. 63 * 64 * ### pmap_enter distributes this mapping to all contexts ... maybe 65 * we should avoid this extra work 66 * 67 * THIS IS NOT IDEAL -- WE NEED ONLY VIRTUAL SPACE BUT kmem_alloc_wait 68 * DOES WORK DESIGNED TO SUPPLY PHYSICAL SPACE ON DEMAND LATER 69 */ 70 vmapbuf(bp) 71 register struct buf *bp; 72 { 73 register int npf; 74 register caddr_t addr; 75 struct proc *p; 76 int off; 77 vm_offset_t kva; 78 register vm_offset_t pa; 79 80 if ((bp->b_flags & B_PHYS) == 0) 81 panic("vmapbuf"); 82 addr = bp->b_saveaddr = bp->b_un.b_addr; 83 off = (int)addr & PGOFSET; 84 p = bp->b_proc; 85 npf = btoc(round_page(bp->b_bcount + off)); 86 kva = kmem_alloc_wait(phys_map, ctob(npf)); 87 bp->b_un.b_addr = (caddr_t) (kva + off); 88 while (npf--) { 89 pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), 90 (vm_offset_t)addr); 91 if (pa == 0) 92 panic("vmapbuf: null page frame"); 93 pmap_enter(vm_map_pmap(phys_map), kva, 94 trunc_page(pa) | PMAP_NC, 95 VM_PROT_READ|VM_PROT_WRITE, 1); 96 addr += PAGE_SIZE; 97 kva += PAGE_SIZE; 98 } 99 } 100 101 /* 102 * Free the io map addresses associated with this IO operation. 103 */ 104 vunmapbuf(bp) 105 register struct buf *bp; 106 { 107 register vm_offset_t kva = (vm_offset_t)bp->b_un.b_addr; 108 register int off, npf; 109 110 if ((bp->b_flags & B_PHYS) == 0) 111 panic("vunmapbuf"); 112 off = (int)kva & PGOFSET; 113 kva -= off; 114 npf = btoc(round_page(bp->b_bcount + off)); 115 kmem_free_wakeup(phys_map, kva, ctob(npf)); 116 bp->b_un.b_addr = bp->b_saveaddr; 117 bp->b_saveaddr = NULL; 118 cache_flush(bp->b_un.b_addr, bp->b_bcount - bp->b_resid); 119 } 120 121 /* 122 * Allocate physical memory space in the dvma virtual address range. 123 */ 124 caddr_t 125 dvma_malloc(size) 126 size_t size; 127 { 128 vm_size_t vsize; 129 caddr_t va; 130 131 vsize = round_page(size); 132 va = (caddr_t)kmem_alloc(phys_map, vsize); 133 if (va == NULL) 134 panic("dvma_malloc"); 135 kvm_uncache(va, vsize >> PGSHIFT); 136 return (va); 137 } 138 139 /* 140 * The offset of the topmost frame in the kernel stack. 141 */ 142 #define TOPFRAMEOFF (UPAGES*NBPG-sizeof(struct trapframe)-sizeof(struct frame)) 143 144 /* 145 * Finish a fork operation, with process p2 nearly set up. 146 * Copy and update the kernel stack and pcb, making the child 147 * ready to run, and marking it so that it can return differently 148 * than the parent. Returns 1 in the child process, 0 in the parent. 149 * 150 * This function relies on the fact that the pcb is 151 * the first element in struct user. 152 */ 153 cpu_fork(p1, p2) 154 register struct proc *p1, *p2; 155 { 156 register struct pcb *opcb = &p1->p_addr->u_pcb; 157 register struct pcb *npcb = &p2->p_addr->u_pcb; 158 register u_int sp, topframe, off, ssize; 159 160 /* 161 * Save all the registers to p1's stack or, in the case of 162 * user registers and invalid stack pointers, to opcb. 163 * snapshot() also sets the given pcb's pcb_sp and pcb_psr 164 * to the current %sp and %psr, and sets pcb_pc to a stub 165 * which returns 1. We then copy the whole pcb to p2; 166 * when swtch() selects p2 to run, it will run at the stub, 167 * rather than at the copying code below, and cpu_fork 168 * will return 1. 169 * 170 * Note that the order `*npcb = *opcb, snapshot(npcb)' is wrong, 171 * as user registers might then wind up only in opcb. 172 * We could call save_user_windows first, 173 * but that would only save 3 stores anyway. 174 * 175 * If process p1 has an FPU state, we must copy it. If it is 176 * the FPU user, we must save the FPU state first. 177 */ 178 snapshot(opcb); 179 bcopy((caddr_t)opcb, (caddr_t)npcb, sizeof(struct pcb)); 180 if (p1->p_md.md_fpstate) { 181 if (p1 == fpproc) 182 savefpstate(p1->p_md.md_fpstate); 183 p2->p_md.md_fpstate = malloc(sizeof(struct fpstate), 184 M_SUBPROC, M_WAITOK); 185 bcopy(p1->p_md.md_fpstate, p2->p_md.md_fpstate, 186 sizeof(struct fpstate)); 187 } else 188 p2->p_md.md_fpstate = NULL; 189 190 /* 191 * Copy the active part of the kernel stack, 192 * then adjust each kernel sp -- the frame pointer 193 * in the top frame is a user sp -- in the child's copy, 194 * including the initial one in the child's pcb. 195 */ 196 sp = npcb->pcb_sp; /* points to old kernel stack */ 197 ssize = (u_int)opcb + UPAGES * NBPG - sp; 198 if (ssize >= UPAGES * NBPG - sizeof(struct pcb)) 199 panic("cpu_fork 1"); 200 off = (u_int)npcb - (u_int)opcb; 201 qcopy((caddr_t)sp, (caddr_t)sp + off, ssize); 202 sp += off; 203 npcb->pcb_sp = sp; 204 topframe = (u_int)npcb + TOPFRAMEOFF; 205 while (sp < topframe) 206 sp = ((struct rwindow *)sp)->rw_in[6] += off; 207 if (sp != topframe) 208 panic("cpu_fork 2"); 209 /* 210 * This might be unnecessary, but it may be possible for the child 211 * to run in ptrace or sendsig before it returns from fork. 212 */ 213 p2->p_md.md_tf = (struct trapframe *)((int)p1->p_md.md_tf + off); 214 return (0); 215 } 216 217 /* 218 * cpu_exit is called as the last action during exit. 219 * We release the address space and machine-dependent resources, 220 * including the memory for the user structure and kernel stack. 221 * Since the latter is also the interrupt stack, we release it 222 * from assembly code after switching to a temporary pcb+stack. 223 */ 224 cpu_exit(p) 225 struct proc *p; 226 { 227 register struct fpstate *fs; 228 229 if ((fs = p->p_md.md_fpstate) != NULL) { 230 if (p == fpproc) { 231 savefpstate(fs); 232 fpproc = NULL; 233 } 234 free((void *)fs, M_SUBPROC); 235 } 236 vmspace_free(p->p_vmspace); 237 swtchexit(kernel_map, p->p_addr, round_page(ctob(UPAGES))); 238 /* NOTREACHED */ 239 } 240 241 /* 242 * cpu_coredump is called to write a core dump header. 243 * (should this be defined elsewhere? machdep.c?) 244 */ 245 int 246 cpu_coredump(p, vp, cred) 247 struct proc *p; 248 struct vnode *vp; 249 struct ucred *cred; 250 { 251 register struct user *up = p->p_addr; 252 253 up->u_md.md_tf = *p->p_md.md_tf; 254 if (p->p_md.md_fpstate) 255 up->u_md.md_fpstate = *p->p_md.md_fpstate; 256 else 257 bzero((caddr_t)&up->u_md.md_fpstate, sizeof(struct fpstate)); 258 return (vn_rdwr(UIO_WRITE, vp, (caddr_t)up, ctob(UPAGES), (off_t)0, 259 UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL, p)); 260 } 261