xref: /original-bsd/sys/hp300/hp300/vm_machdep.c (revision 4f337619)
185e7c9a4Smckusick /*
285e7c9a4Smckusick  * Copyright (c) 1988 University of Utah.
34975c9eaSbostic  * Copyright (c) 1982, 1986, 1990, 1993
44975c9eaSbostic  *	The Regents of the University of California.  All rights reserved.
585e7c9a4Smckusick  *
685e7c9a4Smckusick  * This code is derived from software contributed to Berkeley by
785e7c9a4Smckusick  * the Systems Programming Group of the University of Utah Computer
885e7c9a4Smckusick  * Science Department.
985e7c9a4Smckusick  *
1085e7c9a4Smckusick  * %sccs.include.redist.c%
1185e7c9a4Smckusick  *
12d97a0fc5Shibler  * from: Utah $Hdr: vm_machdep.c 1.21 91/04/06$
1385e7c9a4Smckusick  *
14*4f337619Shibler  *	@(#)vm_machdep.c	8.6 (Berkeley) 01/12/94
1585e7c9a4Smckusick  */
1685e7c9a4Smckusick 
174e6f2c04Sbostic #include <sys/param.h>
184e6f2c04Sbostic #include <sys/systm.h>
194e6f2c04Sbostic #include <sys/proc.h>
204e6f2c04Sbostic #include <sys/malloc.h>
214e6f2c04Sbostic #include <sys/buf.h>
224e6f2c04Sbostic #include <sys/vnode.h>
234e6f2c04Sbostic #include <sys/user.h>
2485e7c9a4Smckusick 
254e6f2c04Sbostic #include <machine/cpu.h>
2685e7c9a4Smckusick 
274e6f2c04Sbostic #include <vm/vm.h>
284e6f2c04Sbostic #include <vm/vm_kern.h>
294e6f2c04Sbostic #include <hp300/hp300/pte.h>
3085e7c9a4Smckusick 
3185e7c9a4Smckusick /*
328749d105Skarels  * Finish a fork operation, with process p2 nearly set up.
338749d105Skarels  * Copy and update the kernel stack and pcb, making the child
348749d105Skarels  * ready to run, and marking it so that it can return differently
358749d105Skarels  * than the parent.  Returns 1 in the child process, 0 in the parent.
368749d105Skarels  * We currently double-map the user area so that the stack is at the same
378749d105Skarels  * address in each process; in the future we will probably relocate
388749d105Skarels  * the frame pointers on the stack after copying.
398749d105Skarels  */
cpu_fork(p1,p2)408749d105Skarels cpu_fork(p1, p2)
418749d105Skarels 	register struct proc *p1, *p2;
428749d105Skarels {
438749d105Skarels 	register struct user *up = p2->p_addr;
44d97a0fc5Shibler 	int offset;
45d97a0fc5Shibler 	extern caddr_t getsp();
468749d105Skarels 	extern char kstack[];
478749d105Skarels 
481c8ce0ccShibler 	p2->p_md.md_regs = p1->p_md.md_regs;
491c8ce0ccShibler 	p2->p_md.md_flags = (p1->p_md.md_flags & ~(MDP_AST|MDP_HPUXTRACE));
501c8ce0ccShibler 
518749d105Skarels 	/*
528749d105Skarels 	 * Copy pcb and stack from proc p1 to p2.
538749d105Skarels 	 * We do this as cheaply as possible, copying only the active
548749d105Skarels 	 * part of the stack.  The stack and pcb need to agree;
558749d105Skarels 	 * this is tricky, as the final pcb is constructed by savectx,
568749d105Skarels 	 * but its frame isn't yet on the stack when the stack is copied.
578c8ded3dSbostic 	 * switch compensates for this when the child eventually runs.
588749d105Skarels 	 * This should be done differently, with a single call
598749d105Skarels 	 * that copies and updates the pcb+stack,
608749d105Skarels 	 * replacing the bcopy and savectx.
618749d105Skarels 	 */
628749d105Skarels 	p2->p_addr->u_pcb = p1->p_addr->u_pcb;
63d97a0fc5Shibler 	offset = getsp() - kstack;
648749d105Skarels 	bcopy((caddr_t)kstack + offset, (caddr_t)p2->p_addr + offset,
658749d105Skarels 	    (unsigned) ctob(UPAGES) - offset);
668749d105Skarels 
678749d105Skarels 	PMAP_ACTIVATE(&p2->p_vmspace->vm_pmap, &up->u_pcb, 0);
688749d105Skarels 
698749d105Skarels 	/*
708749d105Skarels 	 * Arrange for a non-local goto when the new process
718749d105Skarels 	 * is started, to resume here, returning nonzero from setjmp.
728749d105Skarels 	 */
738749d105Skarels 	if (savectx(up, 1)) {
748749d105Skarels 		/*
758749d105Skarels 		 * Return 1 in child.
768749d105Skarels 		 */
778749d105Skarels 		return (1);
788749d105Skarels 	}
798749d105Skarels 	return (0);
808749d105Skarels }
818749d105Skarels 
828749d105Skarels /*
838749d105Skarels  * cpu_exit is called as the last action during exit.
848749d105Skarels  * We release the address space and machine-dependent resources,
858749d105Skarels  * including the memory for the user structure and kernel stack.
868c8ded3dSbostic  * Once finished, we call switch_exit, which switches to a temporary
878749d105Skarels  * pcb and stack and never returns.  We block memory allocation
888c8ded3dSbostic  * until switch_exit has made things safe again.
898749d105Skarels  */
908749d105Skarels cpu_exit(p)
918749d105Skarels 	struct proc *p;
928749d105Skarels {
938749d105Skarels 
948749d105Skarels 	vmspace_free(p->p_vmspace);
958749d105Skarels 
968749d105Skarels 	(void) splimp();
978749d105Skarels 	kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
988c8ded3dSbostic 	switch_exit();
998749d105Skarels 	/* NOTREACHED */
1008749d105Skarels }
1018749d105Skarels 
1028749d105Skarels /*
10366f6a6caSmckusick  * Dump the machine specific header information at the start of a core dump.
10466f6a6caSmckusick  */
10566f6a6caSmckusick cpu_coredump(p, vp, cred)
10666f6a6caSmckusick 	struct proc *p;
10766f6a6caSmckusick 	struct vnode *vp;
10866f6a6caSmckusick 	struct ucred *cred;
10966f6a6caSmckusick {
11066f6a6caSmckusick #ifdef HPUXCOMPAT
11166f6a6caSmckusick 	/*
1121c8ce0ccShibler 	 * If we loaded from an HP-UX format binary file we dump enough
1131c8ce0ccShibler 	 * of an HP-UX style user struct so that the HP-UX debuggers can
1141c8ce0ccShibler 	 * grok it.
11566f6a6caSmckusick 	 */
1161c8ce0ccShibler 	if (p->p_md.md_flags & MDP_HPUX)
11766f6a6caSmckusick 		return (hpuxdumpu(vp, cred));
11866f6a6caSmckusick #endif
11966f6a6caSmckusick 	return (vn_rdwr(UIO_WRITE, vp, (caddr_t) p->p_addr, ctob(UPAGES),
12066f6a6caSmckusick 	    (off_t)0, UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, cred, (int *) NULL,
12166f6a6caSmckusick 	    p));
12266f6a6caSmckusick }
12366f6a6caSmckusick 
12466f6a6caSmckusick /*
12585e7c9a4Smckusick  * Move pages from one kernel virtual address to another.
12685e7c9a4Smckusick  * Both addresses are assumed to reside in the Sysmap,
12785e7c9a4Smckusick  * and size must be a multiple of CLSIZE.
12885e7c9a4Smckusick  */
pagemove(from,to,size)12985e7c9a4Smckusick pagemove(from, to, size)
13085e7c9a4Smckusick 	register caddr_t from, to;
13185e7c9a4Smckusick 	int size;
13285e7c9a4Smckusick {
133*4f337619Shibler 	register vm_offset_t pa;
13485e7c9a4Smckusick 
135*4f337619Shibler #ifdef DEBUG
136*4f337619Shibler 	if (size & CLOFSET)
13785e7c9a4Smckusick 		panic("pagemove");
138*4f337619Shibler #endif
13985e7c9a4Smckusick 	while (size > 0) {
140*4f337619Shibler 		pa = pmap_extract(kernel_pmap, (vm_offset_t)from);
141*4f337619Shibler #ifdef DEBUG
142*4f337619Shibler 		if (pa == 0)
143*4f337619Shibler 			panic("pagemove 2");
144*4f337619Shibler 		if (pmap_extract(kernel_pmap, (vm_offset_t)to) != 0)
145*4f337619Shibler 			panic("pagemove 3");
146*4f337619Shibler #endif
147*4f337619Shibler 		pmap_remove(kernel_pmap,
148*4f337619Shibler 			    (vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
149*4f337619Shibler 		pmap_enter(kernel_pmap,
150*4f337619Shibler 			   (vm_offset_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1);
151*4f337619Shibler 		from += PAGE_SIZE;
152*4f337619Shibler 		to += PAGE_SIZE;
153*4f337619Shibler 		size -= PAGE_SIZE;
15485e7c9a4Smckusick 	}
15585e7c9a4Smckusick }
15685e7c9a4Smckusick 
15785e7c9a4Smckusick /*
158d97a0fc5Shibler  * Map `size' bytes of physical memory starting at `paddr' into
159d97a0fc5Shibler  * kernel VA space at `vaddr'.  Read/write and cache-inhibit status
160d97a0fc5Shibler  * are specified by `prot'.
161d97a0fc5Shibler  */
physaccess(vaddr,paddr,size,prot)162d97a0fc5Shibler physaccess(vaddr, paddr, size, prot)
163d97a0fc5Shibler 	caddr_t vaddr, paddr;
164d97a0fc5Shibler 	register int size, prot;
165d97a0fc5Shibler {
166d97a0fc5Shibler 	register struct pte *pte;
167d97a0fc5Shibler 	register u_int page;
168d97a0fc5Shibler 
169d97a0fc5Shibler 	pte = kvtopte(vaddr);
170d97a0fc5Shibler 	page = (u_int)paddr & PG_FRAME;
171d97a0fc5Shibler 	for (size = btoc(size); size; size--) {
172d97a0fc5Shibler 		*(int *)pte++ = PG_V | prot | page;
173d97a0fc5Shibler 		page += NBPG;
174d97a0fc5Shibler 	}
175d97a0fc5Shibler 	TBIAS();
176d97a0fc5Shibler }
177d97a0fc5Shibler 
physunaccess(vaddr,size)178d97a0fc5Shibler physunaccess(vaddr, size)
179d97a0fc5Shibler 	caddr_t vaddr;
180d97a0fc5Shibler 	register int size;
181d97a0fc5Shibler {
182d97a0fc5Shibler 	register struct pte *pte;
183d97a0fc5Shibler 
184d97a0fc5Shibler 	pte = kvtopte(vaddr);
185d97a0fc5Shibler 	for (size = btoc(size); size; size--)
186d97a0fc5Shibler 		*(int *)pte++ = PG_NV;
187d97a0fc5Shibler 	TBIAS();
188d97a0fc5Shibler }
189d97a0fc5Shibler 
190d97a0fc5Shibler /*
19155368febSmckusick  * Set a red zone in the kernel stack after the u. area.
19255368febSmckusick  * We don't support a redzone right now.  It really isn't clear
19355368febSmckusick  * that it is a good idea since, if the kernel stack were to roll
19455368febSmckusick  * into a write protected page, the processor would lock up (since
19555368febSmckusick  * it cannot create an exception frame) and we would get no useful
19655368febSmckusick  * post-mortem info.  Currently, under the DEBUG option, we just
19755368febSmckusick  * check at every clock interrupt to see if the current k-stack has
19855368febSmckusick  * gone too far (i.e. into the "redzone" page) and if so, panic.
19955368febSmckusick  * Look at _lev6intr in locore.s for more details.
20085e7c9a4Smckusick  */
20155368febSmckusick /*ARGSUSED*/
20255368febSmckusick setredzone(pte, vaddr)
20355368febSmckusick 	struct pte *pte;
20455368febSmckusick 	caddr_t vaddr;
20585e7c9a4Smckusick {
20685e7c9a4Smckusick }
20785e7c9a4Smckusick 
20885e7c9a4Smckusick /*
20985e7c9a4Smckusick  * Convert kernel VA to physical address
21085e7c9a4Smckusick  */
kvtop(addr)21185e7c9a4Smckusick kvtop(addr)
21285e7c9a4Smckusick 	register caddr_t addr;
21385e7c9a4Smckusick {
21455368febSmckusick 	vm_offset_t va;
21585e7c9a4Smckusick 
21655368febSmckusick 	va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
21755368febSmckusick 	if (va == 0)
21885e7c9a4Smckusick 		panic("kvtop: zero page frame");
21955368febSmckusick 	return((int)va);
22085e7c9a4Smckusick }
22185e7c9a4Smckusick 
22255368febSmckusick extern vm_map_t phys_map;
22385e7c9a4Smckusick 
22485e7c9a4Smckusick /*
22514b153a2Shibler  * Map an IO request into kernel virtual address space.
22685e7c9a4Smckusick  *
22714b153a2Shibler  * XXX we allocate KVA space by using kmem_alloc_wait which we know
22814b153a2Shibler  * allocates space without backing physical memory.  This implementation
22914b153a2Shibler  * is a total crock, the multiple mappings of these physical pages should
23014b153a2Shibler  * be reflected in the higher-level VM structures to avoid problems.
23185e7c9a4Smckusick  */
vmapbuf(bp)23285e7c9a4Smckusick vmapbuf(bp)
23385e7c9a4Smckusick 	register struct buf *bp;
23485e7c9a4Smckusick {
23555368febSmckusick 	register int npf;
23685e7c9a4Smckusick 	register caddr_t addr;
23785e7c9a4Smckusick 	register long flags = bp->b_flags;
23885e7c9a4Smckusick 	struct proc *p;
23955368febSmckusick 	int off;
24055368febSmckusick 	vm_offset_t kva;
24155368febSmckusick 	register vm_offset_t pa;
24285e7c9a4Smckusick 
24385e7c9a4Smckusick 	if ((flags & B_PHYS) == 0)
24485e7c9a4Smckusick 		panic("vmapbuf");
24587725cbaSbostic 	addr = bp->b_saveaddr = bp->b_data;
24685e7c9a4Smckusick 	off = (int)addr & PGOFSET;
24755368febSmckusick 	p = bp->b_proc;
24855368febSmckusick 	npf = btoc(round_page(bp->b_bcount + off));
24955368febSmckusick 	kva = kmem_alloc_wait(phys_map, ctob(npf));
25087725cbaSbostic 	bp->b_data = (caddr_t)(kva + off);
25185e7c9a4Smckusick 	while (npf--) {
2526ac7f63fSkarels 		pa = pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map),
2536ac7f63fSkarels 		    (vm_offset_t)addr);
25455368febSmckusick 		if (pa == 0)
25555368febSmckusick 			panic("vmapbuf: null page frame");
25655368febSmckusick 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
25755368febSmckusick 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
25855368febSmckusick 		addr += PAGE_SIZE;
25955368febSmckusick 		kva += PAGE_SIZE;
26085e7c9a4Smckusick 	}
26185e7c9a4Smckusick }
26285e7c9a4Smckusick 
26385e7c9a4Smckusick /*
26485e7c9a4Smckusick  * Free the io map PTEs associated with this IO operation.
26585e7c9a4Smckusick  */
vunmapbuf(bp)26685e7c9a4Smckusick vunmapbuf(bp)
26785e7c9a4Smckusick 	register struct buf *bp;
26885e7c9a4Smckusick {
26987725cbaSbostic 	register caddr_t addr;
27055368febSmckusick 	register int npf;
27155368febSmckusick 	vm_offset_t kva;
27285e7c9a4Smckusick 
27385e7c9a4Smckusick 	if ((bp->b_flags & B_PHYS) == 0)
27485e7c9a4Smckusick 		panic("vunmapbuf");
27587725cbaSbostic 	addr = bp->b_data;
27655368febSmckusick 	npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
27755368febSmckusick 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
27855368febSmckusick 	kmem_free_wakeup(phys_map, kva, ctob(npf));
27987725cbaSbostic 	bp->b_data = bp->b_saveaddr;
28085e7c9a4Smckusick 	bp->b_saveaddr = NULL;
28185e7c9a4Smckusick }
282930487bdShibler 
283930487bdShibler #ifdef MAPPEDCOPY
284930487bdShibler u_int mappedcopysize = 4096;
285930487bdShibler 
mappedcopyin(fromp,top,count)286930487bdShibler mappedcopyin(fromp, top, count)
287930487bdShibler 	register char *fromp, *top;
288930487bdShibler 	register int count;
289930487bdShibler {
290930487bdShibler 	register vm_offset_t kva, upa;
291930487bdShibler 	register int off, len;
292930487bdShibler 	int alignable;
293930487bdShibler 	pmap_t upmap;
294930487bdShibler 	extern caddr_t CADDR1;
295930487bdShibler 
296930487bdShibler 	kva = (vm_offset_t) CADDR1;
297930487bdShibler 	off = (vm_offset_t)fromp & PAGE_MASK;
298930487bdShibler 	alignable = (off == ((vm_offset_t)top & PAGE_MASK));
299930487bdShibler 	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
300930487bdShibler 	while (count > 0) {
301930487bdShibler 		/*
302930487bdShibler 		 * First access of a page, use fubyte to make sure
303930487bdShibler 		 * page is faulted in and read access allowed.
304930487bdShibler 		 */
305930487bdShibler 		if (fubyte(fromp) == -1)
306930487bdShibler 			return (EFAULT);
307930487bdShibler 		/*
308930487bdShibler 		 * Map in the page and bcopy data in from it
309930487bdShibler 		 */
310930487bdShibler 		upa = pmap_extract(upmap, trunc_page(fromp));
311930487bdShibler 		if (upa == 0)
312930487bdShibler 			panic("mappedcopyin");
313930487bdShibler 		len = min(count, PAGE_SIZE-off);
314930487bdShibler 		pmap_enter(kernel_pmap, kva, upa, VM_PROT_READ, TRUE);
315930487bdShibler 		if (len == PAGE_SIZE && alignable && off == 0)
316930487bdShibler 			copypage(kva, top);
317930487bdShibler 		else
318930487bdShibler 			bcopy((caddr_t)(kva+off), top, len);
319930487bdShibler 		fromp += len;
320930487bdShibler 		top += len;
321930487bdShibler 		count -= len;
322930487bdShibler 		off = 0;
323930487bdShibler 	}
324930487bdShibler 	pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
325930487bdShibler 	return (0);
326930487bdShibler }
327930487bdShibler 
mappedcopyout(fromp,top,count)328930487bdShibler mappedcopyout(fromp, top, count)
329930487bdShibler 	register char *fromp, *top;
330930487bdShibler 	register int count;
331930487bdShibler {
332930487bdShibler 	register vm_offset_t kva, upa;
333930487bdShibler 	register int off, len;
334930487bdShibler 	int alignable;
335930487bdShibler 	pmap_t upmap;
336930487bdShibler 	extern caddr_t CADDR2;
337930487bdShibler 
338930487bdShibler 	kva = (vm_offset_t) CADDR2;
339930487bdShibler 	off = (vm_offset_t)top & PAGE_MASK;
340930487bdShibler 	alignable = (off == ((vm_offset_t)fromp & PAGE_MASK));
341930487bdShibler 	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
342930487bdShibler 	while (count > 0) {
343930487bdShibler 		/*
344930487bdShibler 		 * First access of a page, use subyte to make sure
345930487bdShibler 		 * page is faulted in and write access allowed.
346930487bdShibler 		 */
347930487bdShibler 		if (subyte(top, *fromp) == -1)
348930487bdShibler 			return (EFAULT);
349930487bdShibler 		/*
350930487bdShibler 		 * Map in the page and bcopy data out to it
351930487bdShibler 		 */
352930487bdShibler 		upa = pmap_extract(upmap, trunc_page(top));
353930487bdShibler 		if (upa == 0)
354930487bdShibler 			panic("mappedcopyout");
355930487bdShibler 		len = min(count, PAGE_SIZE-off);
356930487bdShibler 		pmap_enter(kernel_pmap, kva, upa,
357930487bdShibler 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
358930487bdShibler 		if (len == PAGE_SIZE && alignable && off == 0)
359930487bdShibler 			copypage(fromp, kva);
360930487bdShibler 		else
361930487bdShibler 			bcopy(fromp, (caddr_t)(kva+off), len);
362930487bdShibler 		fromp += len;
363930487bdShibler 		top += len;
364930487bdShibler 		count -= len;
365930487bdShibler 		off = 0;
366930487bdShibler 	}
367930487bdShibler 	pmap_remove(kernel_pmap, kva, kva+PAGE_SIZE);
368930487bdShibler 	return (0);
369930487bdShibler }
370930487bdShibler #endif
371