1a06587f5Smckusick /*
2a06587f5Smckusick * Copyright (c) 1988 University of Utah.
33195adacSbostic * Copyright (c) 1992, 1993
43195adacSbostic * The Regents of the University of California. All rights reserved.
5a06587f5Smckusick *
6a06587f5Smckusick * This code is derived from software contributed to Berkeley by
7a06587f5Smckusick * the Systems Programming Group of the University of Utah Computer
8a06587f5Smckusick * Science Department and Ralph Campbell.
9a06587f5Smckusick *
10a06587f5Smckusick * %sccs.include.redist.c%
11a06587f5Smckusick *
12a06587f5Smckusick * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
13a06587f5Smckusick *
14*e9a66bd9Smckusick * @(#)vm_machdep.c 8.3 (Berkeley) 01/04/94
15a06587f5Smckusick */
16a06587f5Smckusick
17327b2279Sbostic #include <sys/param.h>
18327b2279Sbostic #include <sys/systm.h>
19327b2279Sbostic #include <sys/proc.h>
20327b2279Sbostic #include <sys/malloc.h>
21327b2279Sbostic #include <sys/buf.h>
22327b2279Sbostic #include <sys/vnode.h>
23327b2279Sbostic #include <sys/user.h>
24a06587f5Smckusick
25327b2279Sbostic #include <vm/vm.h>
26327b2279Sbostic #include <vm/vm_kern.h>
27327b2279Sbostic #include <vm/vm_page.h>
28a06587f5Smckusick
29327b2279Sbostic #include <machine/pte.h>
30a06587f5Smckusick
31a06587f5Smckusick /*
32a06587f5Smckusick * Finish a fork operation, with process p2 nearly set up.
33a06587f5Smckusick * Copy and update the kernel stack and pcb, making the child
34a06587f5Smckusick * ready to run, and marking it so that it can return differently
35a06587f5Smckusick * than the parent. Returns 1 in the child process, 0 in the parent.
36a06587f5Smckusick * We currently double-map the user area so that the stack is at the same
37a06587f5Smckusick * address in each process; in the future we will probably relocate
38a06587f5Smckusick * the frame pointers on the stack after copying.
39a06587f5Smckusick */
cpu_fork(p1,p2)40a06587f5Smckusick cpu_fork(p1, p2)
41a06587f5Smckusick register struct proc *p1, *p2;
42a06587f5Smckusick {
43a06587f5Smckusick register struct user *up = p2->p_addr;
44a06587f5Smckusick register pt_entry_t *pte;
45a06587f5Smckusick register int i;
46b80042feSralph extern struct proc *machFPCurProcPtr;
47a06587f5Smckusick
486f7553a3Sralph p2->p_md.md_regs = up->u_pcb.pcb_regs;
49a06587f5Smckusick p2->p_md.md_flags = p1->p_md.md_flags & (MDP_FPUSED | MDP_ULTRIX);
50a06587f5Smckusick
51a06587f5Smckusick /*
52*e9a66bd9Smckusick * Cache the PTEs for the user area in the machine dependent
53*e9a66bd9Smckusick * part of the proc struct so cpu_switch() can quickly map in
54*e9a66bd9Smckusick * the user struct and kernel stack. Note: if the virtual address
55*e9a66bd9Smckusick * translation changes (e.g. swapout) we have to update this.
56a06587f5Smckusick */
57a06587f5Smckusick pte = kvtopte(up);
58a06587f5Smckusick for (i = 0; i < UPAGES; i++) {
59a06587f5Smckusick p2->p_md.md_upte[i] = pte->pt_entry & ~PG_G;
60a06587f5Smckusick pte++;
61a06587f5Smckusick }
62a06587f5Smckusick
63a06587f5Smckusick /*
64b80042feSralph * Copy floating point state from the FP chip if this process
65b80042feSralph * has state stored there.
66b80042feSralph */
67b80042feSralph if (p1 == machFPCurProcPtr)
68b80042feSralph MachSaveCurFPState(p1);
69b80042feSralph
70b80042feSralph /*
71a06587f5Smckusick * Copy pcb and stack from proc p1 to p2.
72a06587f5Smckusick * We do this as cheaply as possible, copying only the active
73a06587f5Smckusick * part of the stack. The stack and pcb need to agree;
74a06587f5Smckusick */
75a06587f5Smckusick p2->p_addr->u_pcb = p1->p_addr->u_pcb;
76c0646a2bSralph /* cache segtab for ULTBMiss() */
77c0646a2bSralph p2->p_addr->u_pcb.pcb_segtab = (void *)p2->p_vmspace->vm_pmap.pm_segtab;
78a06587f5Smckusick
79a06587f5Smckusick /*
80a06587f5Smckusick * Arrange for a non-local goto when the new process
81a06587f5Smckusick * is started, to resume here, returning nonzero from setjmp.
82a06587f5Smckusick */
83a06587f5Smckusick #ifdef DIAGNOSTIC
84a06587f5Smckusick if (p1 != curproc)
85a06587f5Smckusick panic("cpu_fork: curproc");
86a06587f5Smckusick #endif
87a06587f5Smckusick if (copykstack(up)) {
88a06587f5Smckusick /*
89a06587f5Smckusick * Return 1 in child.
90a06587f5Smckusick */
91a06587f5Smckusick return (1);
92a06587f5Smckusick }
93a06587f5Smckusick return (0);
94a06587f5Smckusick }
95a06587f5Smckusick
96a06587f5Smckusick /*
97*e9a66bd9Smckusick * Finish a swapin operation.
98*e9a66bd9Smckusick * We neded to update the cached PTEs for the user area in the
99*e9a66bd9Smckusick * machine dependent part of the proc structure.
100*e9a66bd9Smckusick */
101*e9a66bd9Smckusick void
cpu_swapin(p)102*e9a66bd9Smckusick cpu_swapin(p)
103*e9a66bd9Smckusick register struct proc *p;
104*e9a66bd9Smckusick {
105*e9a66bd9Smckusick register struct user *up = p->p_addr;
106*e9a66bd9Smckusick register pt_entry_t *pte;
107*e9a66bd9Smckusick register int i;
108*e9a66bd9Smckusick
109*e9a66bd9Smckusick /*
110*e9a66bd9Smckusick * Cache the PTEs for the user area in the machine dependent
111*e9a66bd9Smckusick * part of the proc struct so cpu_switch() can quickly map in
112*e9a66bd9Smckusick * the user struct and kernel stack.
113*e9a66bd9Smckusick */
114*e9a66bd9Smckusick pte = kvtopte(up);
115*e9a66bd9Smckusick for (i = 0; i < UPAGES; i++) {
116*e9a66bd9Smckusick p->p_md.md_upte[i] = pte->pt_entry & ~PG_G;
117*e9a66bd9Smckusick pte++;
118*e9a66bd9Smckusick }
119*e9a66bd9Smckusick }
120*e9a66bd9Smckusick
121*e9a66bd9Smckusick /*
122a06587f5Smckusick * cpu_exit is called as the last action during exit.
123a06587f5Smckusick * We release the address space and machine-dependent resources,
124a06587f5Smckusick * including the memory for the user structure and kernel stack.
1256538a4aeSmckusick * Once finished, we call switch_exit, which switches to a temporary
126a06587f5Smckusick * pcb and stack and never returns. We block memory allocation
1276538a4aeSmckusick * until switch_exit has made things safe again.
128a06587f5Smckusick */
129a06587f5Smckusick cpu_exit(p)
130a06587f5Smckusick struct proc *p;
131a06587f5Smckusick {
132a06587f5Smckusick extern struct proc *machFPCurProcPtr;
133a06587f5Smckusick
134a06587f5Smckusick if (machFPCurProcPtr == p)
135a06587f5Smckusick machFPCurProcPtr = (struct proc *)0;
136a06587f5Smckusick
137a06587f5Smckusick vmspace_free(p->p_vmspace);
138a06587f5Smckusick
139a06587f5Smckusick (void) splhigh();
140a06587f5Smckusick kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
1416538a4aeSmckusick switch_exit();
142a06587f5Smckusick /* NOTREACHED */
143a06587f5Smckusick }
144a06587f5Smckusick
145a06587f5Smckusick /*
146185136f6Storek * Dump the machine specific header information at the start of a core dump.
147185136f6Storek */
148185136f6Storek cpu_coredump(p, vp, cred)
149185136f6Storek struct proc *p;
150185136f6Storek struct vnode *vp;
151185136f6Storek struct ucred *cred;
152185136f6Storek {
153425cee62Sralph extern struct proc *machFPCurProcPtr;
154425cee62Sralph
155425cee62Sralph /*
156425cee62Sralph * Copy floating point state from the FP chip if this process
157425cee62Sralph * has state stored there.
158425cee62Sralph */
159425cee62Sralph if (p == machFPCurProcPtr)
160425cee62Sralph MachSaveCurFPState(p);
161185136f6Storek
162185136f6Storek return (vn_rdwr(UIO_WRITE, vp, (caddr_t)p->p_addr, ctob(UPAGES),
163185136f6Storek (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
164185136f6Storek p));
165185136f6Storek }
166185136f6Storek
167185136f6Storek /*
168a06587f5Smckusick * Move pages from one kernel virtual address to another.
169a06587f5Smckusick * Both addresses are assumed to reside in the Sysmap,
170a06587f5Smckusick * and size must be a multiple of CLSIZE.
171a06587f5Smckusick */
pagemove(from,to,size)172a06587f5Smckusick pagemove(from, to, size)
173a06587f5Smckusick register caddr_t from, to;
174a06587f5Smckusick int size;
175a06587f5Smckusick {
176a06587f5Smckusick register pt_entry_t *fpte, *tpte;
177a06587f5Smckusick
178a06587f5Smckusick if (size % CLBYTES)
179a06587f5Smckusick panic("pagemove");
180a06587f5Smckusick fpte = kvtopte(from);
181a06587f5Smckusick tpte = kvtopte(to);
182a06587f5Smckusick while (size > 0) {
183a06587f5Smckusick MachTLBFlushAddr(from);
184a06587f5Smckusick MachTLBUpdate(to, *fpte);
185a06587f5Smckusick *tpte++ = *fpte;
186a06587f5Smckusick fpte->pt_entry = 0;
187a06587f5Smckusick fpte++;
188a06587f5Smckusick size -= NBPG;
189a06587f5Smckusick from += NBPG;
190a06587f5Smckusick to += NBPG;
191a06587f5Smckusick }
192a06587f5Smckusick }
193a06587f5Smckusick
194a06587f5Smckusick extern vm_map_t phys_map;
195a06587f5Smckusick
196a06587f5Smckusick /*
197a06587f5Smckusick * Map an IO request into kernel virtual address space. Requests fall into
198a06587f5Smckusick * one of five catagories:
199a06587f5Smckusick *
200a06587f5Smckusick * B_PHYS|B_UAREA: User u-area swap.
201a06587f5Smckusick * Address is relative to start of u-area (p_addr).
202a06587f5Smckusick * B_PHYS|B_PAGET: User page table swap.
203a06587f5Smckusick * Address is a kernel VA in usrpt (Usrptmap).
204a06587f5Smckusick * B_PHYS|B_DIRTY: Dirty page push.
205a06587f5Smckusick * Address is a VA in proc2's address space.
206a06587f5Smckusick * B_PHYS|B_PGIN: Kernel pagein of user pages.
207a06587f5Smckusick * Address is VA in user's address space.
208a06587f5Smckusick * B_PHYS: User "raw" IO request.
209a06587f5Smckusick * Address is VA in user's address space.
210a06587f5Smckusick *
211a06587f5Smckusick * All requests are (re)mapped into kernel VA space via the phys_map
212a06587f5Smckusick */
vmapbuf(bp)213a06587f5Smckusick vmapbuf(bp)
214a06587f5Smckusick register struct buf *bp;
215a06587f5Smckusick {
216a06587f5Smckusick register caddr_t addr;
217a06587f5Smckusick register vm_size_t sz;
218a06587f5Smckusick struct proc *p;
219a06587f5Smckusick int off;
220a06587f5Smckusick vm_offset_t kva;
221a06587f5Smckusick register vm_offset_t pa;
222a06587f5Smckusick
223a06587f5Smckusick if ((bp->b_flags & B_PHYS) == 0)
224a06587f5Smckusick panic("vmapbuf");
225a06587f5Smckusick addr = bp->b_saveaddr = bp->b_un.b_addr;
226a06587f5Smckusick off = (int)addr & PGOFSET;
227a06587f5Smckusick p = bp->b_proc;
228a06587f5Smckusick sz = round_page(bp->b_bcount + off);
229a06587f5Smckusick kva = kmem_alloc_wait(phys_map, sz);
230a06587f5Smckusick bp->b_un.b_addr = (caddr_t) (kva + off);
231a06587f5Smckusick sz = atop(sz);
232a06587f5Smckusick while (sz--) {
233a06587f5Smckusick pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
234a06587f5Smckusick (vm_offset_t)addr);
235a06587f5Smckusick if (pa == 0)
236a06587f5Smckusick panic("vmapbuf: null page frame");
237a06587f5Smckusick pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
238a06587f5Smckusick VM_PROT_READ|VM_PROT_WRITE, TRUE);
239a06587f5Smckusick addr += PAGE_SIZE;
240a06587f5Smckusick kva += PAGE_SIZE;
241a06587f5Smckusick }
242a06587f5Smckusick }
243a06587f5Smckusick
244a06587f5Smckusick /*
245a06587f5Smckusick * Free the io map PTEs associated with this IO operation.
246a06587f5Smckusick * We also invalidate the TLB entries and restore the original b_addr.
247a06587f5Smckusick */
vunmapbuf(bp)248a06587f5Smckusick vunmapbuf(bp)
249a06587f5Smckusick register struct buf *bp;
250a06587f5Smckusick {
251a06587f5Smckusick register caddr_t addr = bp->b_un.b_addr;
252a06587f5Smckusick register vm_size_t sz;
253a06587f5Smckusick vm_offset_t kva;
254a06587f5Smckusick
255a06587f5Smckusick if ((bp->b_flags & B_PHYS) == 0)
256a06587f5Smckusick panic("vunmapbuf");
257a06587f5Smckusick sz = round_page(bp->b_bcount + ((int)addr & PGOFSET));
258a06587f5Smckusick kva = (vm_offset_t)((int)addr & ~PGOFSET);
259a06587f5Smckusick kmem_free_wakeup(phys_map, kva, sz);
260a06587f5Smckusick bp->b_un.b_addr = bp->b_saveaddr;
261a06587f5Smckusick bp->b_saveaddr = NULL;
262a06587f5Smckusick }
263