xref: /original-bsd/sys/hp300/hp300/vm_machdep.c (revision 331bfa8d)
1 /*
2  * Copyright (c) 1988 University of Utah.
3  * Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  * from: Utah $Hdr: vm_machdep.c 1.18 89/08/23$
13  *
14  *	@(#)vm_machdep.c	7.7 (Berkeley) 12/16/90
15  */
16 
17 #include "sys/param.h"
18 #include "sys/systm.h"
19 #include "sys/user.h"
20 #include "sys/proc.h"
21 #include "sys/cmap.h"
22 #include "sys/malloc.h"
23 #include "sys/buf.h"
24 
25 #include "../include/cpu.h"
26 
27 #include "vm/vm_param.h"
28 #include "vm/pmap.h"
29 #include "vm/vm_map.h"
30 
31 /*
32  * Move pages from one kernel virtual address to another.
33  * Both addresses are assumed to reside in the Sysmap,
34  * and size must be a multiple of CLSIZE.
35  */
36 pagemove(from, to, size)
37 	register caddr_t from, to;
38 	int size;
39 {
40 	register struct pte *fpte, *tpte;
41 
42 	if (size % CLBYTES)
43 		panic("pagemove");
44 	fpte = kvtopte(from);
45 	tpte = kvtopte(to);
46 	while (size > 0) {
47 		*tpte++ = *fpte;
48 		*(int *)fpte++ = PG_NV;
49 		TBIS(from);
50 		TBIS(to);
51 		from += NBPG;
52 		to += NBPG;
53 		size -= NBPG;
54 	}
55 	/* buffer pages not CI with new VM */
56 	DCIS();
57 }
58 
59 /*
60  * Set a red zone in the kernel stack after the u. area.
61  * We don't support a redzone right now.  It really isn't clear
62  * that it is a good idea since, if the kernel stack were to roll
63  * into a write protected page, the processor would lock up (since
64  * it cannot create an exception frame) and we would get no useful
65  * post-mortem info.  Currently, under the DEBUG option, we just
66  * check at every clock interrupt to see if the current k-stack has
67  * gone too far (i.e. into the "redzone" page) and if so, panic.
68  * Look at _lev6intr in locore.s for more details.
69  */
70 /*ARGSUSED*/
71 setredzone(pte, vaddr)
72 	struct pte *pte;
73 	caddr_t vaddr;
74 {
75 }
76 
77 /*
78  * Convert kernel VA to physical address
79  */
80 kvtop(addr)
81 	register caddr_t addr;
82 {
83 	vm_offset_t va;
84 
85 	va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
86 	if (va == 0)
87 		panic("kvtop: zero page frame");
88 	return((int)va);
89 }
90 
91 extern vm_map_t phys_map;
92 
93 /*
94  * Map an IO request into kernel virtual address space.  Requests fall into
95  * one of five catagories:
96  *
97  *	B_PHYS|B_UAREA:	User u-area swap.
98  *			Address is relative to start of u-area (p_addr).
99  *	B_PHYS|B_PAGET:	User page table swap.
100  *			Address is a kernel VA in usrpt (Usrptmap).
101  *	B_PHYS|B_DIRTY:	Dirty page push.
102  *			Address is a VA in proc2's address space.
103  *	B_PHYS|B_PGIN:	Kernel pagein of user pages.
104  *			Address is VA in user's address space.
105  *	B_PHYS:		User "raw" IO request.
106  *			Address is VA in user's address space.
107  *
108  * All requests are (re)mapped into kernel VA space via the useriomap
109  * (a name with only slightly more meaning than "kernelmap")
110  */
111 vmapbuf(bp)
112 	register struct buf *bp;
113 {
114 	register int npf;
115 	register caddr_t addr;
116 	register long flags = bp->b_flags;
117 	struct proc *p;
118 	int off;
119 	vm_offset_t kva;
120 	register vm_offset_t pa;
121 
122 	if ((flags & B_PHYS) == 0)
123 		panic("vmapbuf");
124 	addr = bp->b_saveaddr = bp->b_un.b_addr;
125 	off = (int)addr & PGOFSET;
126 	p = bp->b_proc;
127 	npf = btoc(round_page(bp->b_bcount + off));
128 	kva = kmem_alloc_wait(phys_map, ctob(npf));
129 	bp->b_un.b_addr = (caddr_t) (kva + off);
130 	while (npf--) {
131 		pa = pmap_extract(vm_map_pmap(p->p_map), (vm_offset_t)addr);
132 		if (pa == 0)
133 			panic("vmapbuf: null page frame");
134 		pmap_enter(vm_map_pmap(phys_map), kva, trunc_page(pa),
135 			   VM_PROT_READ|VM_PROT_WRITE, TRUE);
136 		addr += PAGE_SIZE;
137 		kva += PAGE_SIZE;
138 	}
139 }
140 
141 /*
142  * Free the io map PTEs associated with this IO operation.
143  * We also invalidate the TLB entries and restore the original b_addr.
144  */
145 vunmapbuf(bp)
146 	register struct buf *bp;
147 {
148 	register int npf;
149 	register caddr_t addr = bp->b_un.b_addr;
150 	vm_offset_t kva;
151 
152 	if ((bp->b_flags & B_PHYS) == 0)
153 		panic("vunmapbuf");
154 	npf = btoc(round_page(bp->b_bcount + ((int)addr & PGOFSET)));
155 	kva = (vm_offset_t)((int)addr & ~PGOFSET);
156 	kmem_free_wakeup(phys_map, kva, ctob(npf));
157 	bp->b_un.b_addr = bp->b_saveaddr;
158 	bp->b_saveaddr = NULL;
159 }
160