1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1992 OMRON Corporation.
4  * Copyright (c) 1982, 1986, 1990, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department.
10  *
11  * %sccs.include.redist.c%
12  *
13  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
14  * from: hp300/hp300/vm_machdep.c	8.4 (Berkeley) 11/14/93
15  *
16  *	@(#)vm_machdep.c	8.3 (Berkeley) 12/06/93
17  */
18 
19 #include <sys/param.h>
20 #include <sys/systm.h>
21 #include <sys/proc.h>
22 #include <sys/malloc.h>
23 #include <sys/buf.h>
24 #include <sys/vnode.h>
25 #include <sys/user.h>
26 
27 #include <machine/cpu.h>
28 
29 #include <vm/vm.h>
30 #include <vm/vm_kern.h>
31 #include <luna68k/luna68k/pte.h>
32 
33 /*
34  * Finish a fork operation, with process p2 nearly set up.
35  * Copy and update the kernel stack and pcb, making the child
36  * ready to run, and marking it so that it can return differently
37  * than the parent.  Returns 1 in the child process, 0 in the parent.
38  * We currently double-map the user area so that the stack is at the same
39  * address in each process; in the future we will probably relocate
40  * the frame pointers on the stack after copying.
41  */
42 cpu_fork(p1, p2)
43 	register struct proc *p1, *p2;
44 {
45 	register struct user *up = p2->p_addr;
46 	int offset;
47 	extern caddr_t getsp();
48 	extern char kstack[];
49 
50 	p2->p_md.md_regs = p1->p_md.md_regs;
51 	p2->p_md.md_flags = (p1->p_md.md_flags & ~(MDP_AST|MDP_HPUXTRACE));
52 
53 	/*
54 	 * Copy pcb and stack from proc p1 to p2.
55 	 * We do this as cheaply as possible, copying only the active
56 	 * part of the stack.  The stack and pcb need to agree;
57 	 * this is tricky, as the final pcb is constructed by savectx,
58 	 * but its frame isn't yet on the stack when the stack is copied.
59 	 * switch compensates for this when the child eventually runs.
60 	 * This should be done differently, with a single call
61 	 * that copies and updates the pcb+stack,
62 	 * replacing the bcopy and savectx.
63 	 */
64 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
65 	offset = getsp() - kstack;
66 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
67 	    (unsigned) ctob(UPAGES) - offset);
68 
69 	PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0);
70 
71 	/*
72 	 * Arrange for a non-local goto when the new process
73 	 * is started, to resume here, returning nonzero from setjmp.
74 	 */
75 	if (savectx(up, 1)) {
76 		/*
77 		 * Return 1 in child.
78 		 */
79 		return (1);
80 	}
81 	return (0);
82 }
83 
84 /*
85  * cpu_exit is called as the last action during exit.
86  * We release the address space and machine-dependent resources,
87  * including the memory for the user structure and kernel stack.
88  * Once finished, we call switch_exit, which switches to a temporary
89  * pcb and stack and never returns.  We block memory allocation
90  * until switch_exit has made things safe again.
91  */
92 cpu_exit(p)
93 	struct proc *p;
94 {
95 
96 	vmspace_free(p->p_vmspace);
97 
98 	(void) splimp();
99 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
100 	switch_exit();
101 	/* NOTREACHED */
102 }
103 
104 /*
105  * Dump the machine specific header information at the start of a core dump.
106  */
107 cpu_coredump(p, vp, cred)
108 	struct proc *p;
109 	struct vnode *vp;
110 	struct ucred *cred;
111 {
112 	int error;
113 
114 	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
115 	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL,
116 	    p));
117 }
118 
119 /*
120  * Move pages from one kernel virtual address to another.
121  * Both addresses are assumed to reside in the Sysmap,
122  * and size must be a multiple of CLSIZE.
123  */
124 pagemove(from, to, size)
125 	register caddr_t from, to;
126 	int size;
127 {
128 	register struct pte *fpte, *tpte;
129 
130 	if (size % CLBYTES)
131 		panic("pagemove");
132 	fpte = kvtopte(from);
133 	tpte = kvtopte(to);
134 	while (size > 0) {
135 		*tpte++ = *fpte;
136 		*(int *)fpte++ = PG_NV;
137 		TBIS(from);
138 		TBIS(to);
139 		from += NBPG;
140 		to += NBPG;
141 		size -= NBPG;
142 	}
143 #ifdef LUNA2
144 	DCIS();
145 #endif
146 }
147 
148 /*
149  * Map `size' bytes of physical memory starting at `paddr' into
150  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
151  * are specified by `prot'.
152  */
153 physaccess(vaddr, paddr, size, prot)
154 	caddr_t vaddr, paddr;
155 	register int size, prot;
156 {
157 	register struct pte *pte;
158 	register u_int page;
159 
160 	pte = kvtopte(vaddr);
161 	page = (u_int)paddr & PG_FRAME;
162 	for (size = btoc(size); size; size--) {
163 		*(int *)pte++ = PG_V | prot | page;
164 		page += NBPG;
165 	}
166 	TBIAS();
167 }
168 
169 physunaccess(vaddr, size)
170 	caddr_t vaddr;
171 	register int size;
172 {
173 	register struct pte *pte;
174 
175 	pte = kvtopte(vaddr);
176 	for (size = btoc(size); size; size--)
177 		*(int *)pte++ = PG_NV;
178 	TBIAS();
179 }
180 
181 /*
182  * Set a red zone in the kernel stack after the u. area.
183  * We don't support a redzone right now.  It really isn't clear
184  * that it is a good idea since, if the kernel stack were to roll
185  * into a write protected page, the processor would lock up (since
186  * it cannot create an exception frame) and we would get no useful
187  * post-mortem info.  Currently, under the DEBUG option, we just
188  * check at every clock interrupt to see if the current k-stack has
189  * gone too far (i.e. into the "redzone" page) and if so, panic.
190  * Look at _lev6intr in locore.s for more details.
191  */
192 /*ARGSUSED*/
193 setredzone(pte, vaddr)
194 	struct pte *pte;
195 	caddr_t vaddr;
196 {
197 }
198 
199 /*
200  * Convert kernel VA to physical address
201  */
202 kvtop(addr)
203 	register caddr_t addr;
204 {
205 	vm_offset_t va;
206 
207 	va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
208 	if (va == 0)
209 		panic("kvtop: zero page frame");
210 	return((int)va);
211 }
212 
213 extern vm_map_t phys_map;
214 
215 /*
216  * Map an IO request into kernel virtual address space.
217  *
218  * XXX we allocate KVA space by using kmem_alloc_wait which we know
219  * allocates space without backing physical memory.  This implementation
220  * is a total crock, the multiple mappings of these physical pages should
221  * be reflected in the higher-level VM structures to avoid problems.
222  */
223 vmapbuf(bp)
224 	register struct buf *bp;
225 {
226 	register int npf;
227 	register caddr_t addr;
228 	register long flags = bp->b_flags;
229 	struct proc *p;
230 	int off;
231 	vm_offset_t kva;
232 	register vm_offset_t pa;
233 
234 	if ((flags & B_PHYS) == 0)
235 		panic("vmapbuf");
236 	addr = bp->b_saveaddr = bp->b_data;
237 	off = (int)addr & PGOFSET;
238 	p = bp->b_proc;
239 	npf = btoc(round_page(bp->b_bcount + off));
240 	kva = kmem_alloc_wait(phys_map, ctob(npf));
241 	bp->b_data = (caddr_t) (kva + off);
242 	while (npf--) {
243 		pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
244 		    (vm_offset_t)addr);
245 		if (pa == 0)
246 			panic("vmapbuf: null page frame");
247 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
248 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
249 		addr += PAGE_SIZE;
250 		kva += PAGE_SIZE;
251 	}
252 }
253 
254 /*
255  * Free the io map PTEs associated with this IO operation.
256  */
257 vunmapbuf(bp)
258 	register struct buf *bp;
259 {
260 	register caddr_t addr;
261 	register int npf;
262 	vm_offset_t kva;
263 
264 	if ((bp->b_flags & B_PHYS) == 0)
265 		panic("vunmapbuf");
266 	addr = bp->b_data;
267 	npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
268 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
269 	kmem_free_wakeup(phys_map, kva, ctob(npf));
270 	bp->b_data = bp->b_saveaddr;
271 	bp->b_saveaddr = NULL;
272 }
273 
274 #ifdef MAPPEDCOPY
275 u_int mappedcopysize = 4096;
276 
277 mappedcopyin(fromp, top, count)
278 	register char *fromp, *top;
279 	register int count;
280 {
281 	register vm_offset_t kva, upa;
282 	register int off, len;
283 	int alignable;
284 	pmap_t upmap;
285 	extern caddr_t CADDR1;
286 
287 	kva = (vm_offset_t) CADDR1;
288 	off = (vm_offset_t)fromp & PAGE_MASK;
289 	alignable = (off == ((vm_offset_t)top & PAGE_MASK));
290 	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
291 	while (count > 0) {
292 		/*
293 		 * First access of a page, use fubyte to make sure
294 		 * page is faulted in and read access allowed.
295 		 */
296 		if (fubyte(fromp) == -1)
297 			return (EFAULT);
298 		/*
299 		 * Map in the page and bcopy data in from it
300 		 */
301 		upa = pmap_extract(upmap, trunc_page(fromp));
302 		if (upa == 0)
303 			panic("mappedcopyin");
304 		len = min(count, PAGE_SIZE-off);
305 		pmap_enter(kernel_pmap, kva, upa, VM_PROT_READ, TRUE);
306 		if (len == PAGE_SIZE && alignable && off == 0)
307 			copypage(kva, top);
308 		else
309 			bcopy((caddr_t)(kva+off), top, len);
310 		fromp += len;
311 		top += len;
312 		count -= len;
313 		off = 0;
314 	}
315 	pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
316 	return (0);
317 }
318 
319 mappedcopyout(fromp, top, count)
320 	register char *fromp, *top;
321 	register int count;
322 {
323 	register vm_offset_t kva, upa;
324 	register int off, len;
325 	int alignable;
326 	pmap_t upmap;
327 	extern caddr_t CADDR2;
328 
329 	kva = (vm_offset_t) CADDR2;
330 	off = (vm_offset_t)top & PAGE_MASK;
331 	alignable = (off == ((vm_offset_t)fromp & PAGE_MASK));
332 	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
333 	while (count > 0) {
334 		/*
335 		 * First access of a page, use subyte to make sure
336 		 * page is faulted in and write access allowed.
337 		 */
338 		if (subyte(top, *fromp) == -1)
339 			return (EFAULT);
340 		/*
341 		 * Map in the page and bcopy data out to it
342 		 */
343 		upa = pmap_extract(upmap, trunc_page(top));
344 		if (upa == 0)
345 			panic("mappedcopyout");
346 		len = min(count, PAGE_SIZE-off);
347 		pmap_enter(kernel_pmap, kva, upa,
348 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
349 		if (len == PAGE_SIZE && alignable && off == 0)
350 			copypage(fromp, kva);
351 		else
352 			bcopy(fromp, (caddr_t)(kva+off), len);
353 		fromp += len;
354 		top += len;
355 		count -= len;
356 		off = 0;
357 	}
358 	pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
359 	return (0);
360 }
361 #endif
362