xref: /original-bsd/sys/pmax/pmax/vm_machdep.c (revision 333da485)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992, 1993
4  *	The Regents of the University of California.  All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department and Ralph Campbell.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
13  *
14  *	@(#)vm_machdep.c	8.3 (Berkeley) 01/04/94
15  */
16 
17 #include <sys/param.h>
18 #include <sys/systm.h>
19 #include <sys/proc.h>
20 #include <sys/malloc.h>
21 #include <sys/buf.h>
22 #include <sys/vnode.h>
23 #include <sys/user.h>
24 
25 #include <vm/vm.h>
26 #include <vm/vm_kern.h>
27 #include <vm/vm_page.h>
28 
29 #include <machine/pte.h>
30 
31 /*
32  * Finish a fork operation, with process p2 nearly set up.
33  * Copy and update the kernel stack and pcb, making the child
34  * ready to run, and marking it so that it can return differently
35  * than the parent.  Returns 1 in the child process, 0 in the parent.
36  * We currently double-map the user area so that the stack is at the same
37  * address in each process; in the future we will probably relocate
38  * the frame pointers on the stack after copying.
39  */
40 cpu_fork(p1, p2)
41 	register struct proc *p1, *p2;
42 {
43 	register struct user *up = p2->p_addr;
44 	register pt_entry_t *pte;
45 	register int i;
46 	extern struct proc *machFPCurProcPtr;
47 
48 	p2->p_md.md_regs = up->u_pcb.pcb_regs;
49 	p2->p_md.md_flags = p1->p_md.md_flags & (MDP_FPUSED | MDP_ULTRIX);
50 
51 	/*
52 	 * Cache the PTEs for the user area in the machine dependent
53 	 * part of the proc struct so cpu_switch() can quickly map in
54 	 * the user struct and kernel stack. Note: if the virtual address
55 	 * translation changes (e.g. swapout) we have to update this.
56 	 */
57 	pte = kvtopte(up);
58 	for (i = 0; i < UPAGES; i++) {
59 		p2->p_md.md_upte[i] = pte->pt_entry & ~PG_G;
60 		pte++;
61 	}
62 
63 	/*
64 	 * Copy floating point state from the FP chip if this process
65 	 * has state stored there.
66 	 */
67 	if (p1 == machFPCurProcPtr)
68 		MachSaveCurFPState(p1);
69 
70 	/*
71 	 * Copy pcb and stack from proc p1 to p2.
72 	 * We do this as cheaply as possible, copying only the active
73 	 * part of the stack.  The stack and pcb need to agree;
74 	 */
75 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
76 	/* cache segtab for ULTBMiss() */
77 	p2->p_addr->u_pcb.pcb_segtab = (void *)p2->p_vmspace->vm_pmap.pm_segtab;
78 
79 	/*
80 	 * Arrange for a non-local goto when the new process
81 	 * is started, to resume here, returning nonzero from setjmp.
82 	 */
83 #ifdef DIAGNOSTIC
84 	if (p1 != curproc)
85 		panic("cpu_fork: curproc");
86 #endif
87 	if (copykstack(up)) {
88 		/*
89 		 * Return 1 in child.
90 		 */
91 		return (1);
92 	}
93 	return (0);
94 }
95 
96 /*
97  * Finish a swapin operation.
98  * We neded to update the cached PTEs for the user area in the
99  * machine dependent part of the proc structure.
100  */
101 void
102 cpu_swapin(p)
103 	register struct proc *p;
104 {
105 	register struct user *up = p->p_addr;
106 	register pt_entry_t *pte;
107 	register int i;
108 
109 	/*
110 	 * Cache the PTEs for the user area in the machine dependent
111 	 * part of the proc struct so cpu_switch() can quickly map in
112 	 * the user struct and kernel stack.
113 	 */
114 	pte = kvtopte(up);
115 	for (i = 0; i < UPAGES; i++) {
116 		p->p_md.md_upte[i] = pte->pt_entry & ~PG_G;
117 		pte++;
118 	}
119 }
120 
121 /*
122  * cpu_exit is called as the last action during exit.
123  * We release the address space and machine-dependent resources,
124  * including the memory for the user structure and kernel stack.
125  * Once finished, we call switch_exit, which switches to a temporary
126  * pcb and stack and never returns.  We block memory allocation
127  * until switch_exit has made things safe again.
128  */
129 cpu_exit(p)
130 	struct proc *p;
131 {
132 	extern struct proc *machFPCurProcPtr;
133 
134 	if (machFPCurProcPtr == p)
135 		machFPCurProcPtr = (struct proc *)0;
136 
137 	vmspace_free(p->p_vmspace);
138 
139 	(void) splhigh();
140 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
141 	switch_exit();
142 	/* NOTREACHED */
143 }
144 
145 /*
146  * Dump the machine specific header information at the start of a core dump.
147  */
148 cpu_coredump(p, vp, cred)
149 	struct proc *p;
150 	struct vnode *vp;
151 	struct ucred *cred;
152 {
153 	extern struct proc *machFPCurProcPtr;
154 
155 	/*
156 	 * Copy floating point state from the FP chip if this process
157 	 * has state stored there.
158 	 */
159 	if (p == machFPCurProcPtr)
160 		MachSaveCurFPState(p);
161 
162 	return (vn_rdwr(UIO_WRITE, vp, (caddr_t)p->p_addr, ctob(UPAGES),
163 	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *)NULL,
164 	    p));
165 }
166 
167 /*
168  * Move pages from one kernel virtual address to another.
169  * Both addresses are assumed to reside in the Sysmap,
170  * and size must be a multiple of CLSIZE.
171  */
172 pagemove(from, to, size)
173 	register caddr_t from, to;
174 	int size;
175 {
176 	register pt_entry_t *fpte, *tpte;
177 
178 	if (size % CLBYTES)
179 		panic("pagemove");
180 	fpte = kvtopte(from);
181 	tpte = kvtopte(to);
182 	while (size > 0) {
183 		MachTLBFlushAddr(from);
184 		MachTLBUpdate(to, *fpte);
185 		*tpte++ = *fpte;
186 		fpte->pt_entry = 0;
187 		fpte++;
188 		size -= NBPG;
189 		from += NBPG;
190 		to += NBPG;
191 	}
192 }
193 
194 extern vm_map_t phys_map;
195 
196 /*
197  * Map an IO request into kernel virtual address space.  Requests fall into
198  * one of five catagories:
199  *
200  *	B_PHYS|B_UAREA:	User u-area swap.
201  *			Address is relative to start of u-area (p_addr).
202  *	B_PHYS|B_PAGET:	User page table swap.
203  *			Address is a kernel VA in usrpt (Usrptmap).
204  *	B_PHYS|B_DIRTY:	Dirty page push.
205  *			Address is a VA in proc2's address space.
206  *	B_PHYS|B_PGIN:	Kernel pagein of user pages.
207  *			Address is VA in user's address space.
208  *	B_PHYS:		User "raw" IO request.
209  *			Address is VA in user's address space.
210  *
211  * All requests are (re)mapped into kernel VA space via the phys_map
212  */
213 vmapbuf(bp)
214 	register struct buf *bp;
215 {
216 	register caddr_t addr;
217 	register vm_size_t sz;
218 	struct proc *p;
219 	int off;
220 	vm_offset_t kva;
221 	register vm_offset_t pa;
222 
223 	if ((bp->b_flags & B_PHYS) == 0)
224 		panic("vmapbuf");
225 	addr = bp->b_saveaddr = bp->b_un.b_addr;
226 	off = (int)addr & PGOFSET;
227 	p = bp->b_proc;
228 	sz = round_page(bp->b_bcount + off);
229 	kva = kmem_alloc_wait(phys_map, sz);
230 	bp->b_un.b_addr = (caddr_t) (kva + off);
231 	sz = atop(sz);
232 	while (sz--) {
233 		pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
234 			(vm_offset_t)addr);
235 		if (pa == 0)
236 			panic("vmapbuf: null page frame");
237 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
238 			VM_PROT_READ|VM_PROT_WRITE, TRUE);
239 		addr += PAGE_SIZE;
240 		kva += PAGE_SIZE;
241 	}
242 }
243 
244 /*
245  * Free the io map PTEs associated with this IO operation.
246  * We also invalidate the TLB entries and restore the original b_addr.
247  */
248 vunmapbuf(bp)
249 	register struct buf *bp;
250 {
251 	register caddr_t addr = bp->b_un.b_addr;
252 	register vm_size_t sz;
253 	vm_offset_t kva;
254 
255 	if ((bp->b_flags & B_PHYS) == 0)
256 		panic("vunmapbuf");
257 	sz = round_page(bp->b_bcount + ((int)addr & PGOFSET));
258 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
259 	kmem_free_wakeup(phys_map, kva, sz);
260 	bp->b_un.b_addr = bp->b_saveaddr;
261 	bp->b_saveaddr = NULL;
262 }
263