xref: /netbsd/sys/arch/vax/include/pmap.h (revision c4a72b64)
1 /*	$NetBSD: pmap.h,v 1.56 2002/09/22 07:53:51 chs Exp $	   */
2 
3 /*
4  * Copyright (c) 1987 Carnegie-Mellon University
5  * Copyright (c) 1991 Regents of the University of California.
6  * All rights reserved.
7  *
8  * Changed for the VAX port. /IC
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *	@(#)pmap.h	7.6 (Berkeley) 5/10/91
43  */
44 
45 
46 #ifndef PMAP_H
47 #define PMAP_H
48 
49 #include <machine/pte.h>
50 #include <machine/mtpr.h>
51 #include <machine/pcb.h>
52 
53 /*
54  * Some constants to make life easier.
55  */
56 #define LTOHPS		(PGSHIFT - VAX_PGSHIFT)
57 #define LTOHPN		(1 << LTOHPS)
58 #define PROCPTSIZE	((MAXTSIZ + MAXDSIZ + MAXSSIZ + MMAPSPACE) / VAX_NBPG)
59 #define	NPTEPGS		(PROCPTSIZE / (NBPG / (sizeof(struct pte) * LTOHPN)))
60 
61 /*
62  * Link struct if more than one process share pmap (like vfork).
63  * This is rarely used.
64  */
65 struct pm_share {
66 	struct pm_share	*ps_next;
67 	struct pcb	*ps_pcb;
68 };
69 
70 /*
71  * Pmap structure
72  *  pm_stack holds lowest allocated memory for the process stack.
73  */
74 
75 typedef struct pmap {
76 	struct pte	*pm_p1ap;	/* Base of alloced p1 pte space */
77 	int		 pm_count;	/* reference count */
78 	struct pm_share	*pm_share;	/* PCBs using this pmap */
79 	struct pte	*pm_p0br;	/* page 0 base register */
80 	long		 pm_p0lr;	/* page 0 length register */
81 	struct pte	*pm_p1br;	/* page 1 base register */
82 	long		 pm_p1lr;	/* page 1 length register */
83 	u_char		*pm_pref;	/* pte reference count array */
84 	struct simplelock pm_lock;	/* Lock entry in MP environment */
85 	struct pmap_statistics	 pm_stats;	/* Some statistics */
86 } *pmap_t;
87 
88 /*
89  * For each struct vm_page, there is a list of all currently valid virtual
90  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
91  */
92 
93 struct pv_entry {
94 	struct pv_entry *pv_next;	/* next pv_entry */
95 	vaddr_t		 pv_vaddr;	/* address for this physical page */
96 	struct pmap	*pv_pmap;	/* pmap this entry belongs to */
97 	int		 pv_attr;	/* write/modified bits */
98 };
99 
100 extern	struct  pv_entry *pv_table;
101 
102 /* Mapping macros used when allocating SPT */
103 #define MAPVIRT(ptr, count)				\
104 	(vaddr_t)ptr = virtual_avail;			\
105 	virtual_avail += (count) * VAX_NBPG;
106 
107 #define MAPPHYS(ptr, count, perm)			\
108 	(vaddr_t)ptr = avail_start + KERNBASE;		\
109 	avail_start += (count) * VAX_NBPG;
110 
111 #ifdef	_KERNEL
112 
113 extern	struct pmap kernel_pmap_store;
114 
115 #define pmap_kernel()			(&kernel_pmap_store)
116 
117 #endif	/* _KERNEL */
118 
119 
120 /*
121  * Real nice (fast) routines to get the virtual address of a physical page
122  * (and vice versa).
123  */
124 #define PMAP_MAP_POOLPAGE(pa)	((pa) | KERNBASE)
125 #define PMAP_UNMAP_POOLPAGE(va) ((va) & ~KERNBASE)
126 
127 #define PMAP_STEAL_MEMORY
128 
129 /*
130  * This is the by far most used pmap routine. Make it inline.
131  */
132 __inline static boolean_t
133 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
134 {
135 	paddr_t pa = 0;
136 	int	*pte, sva;
137 
138 	if (va & KERNBASE) {
139 		pa = kvtophys(va); /* Is 0 if not mapped */
140 		if (pap)
141 			*pap = pa;
142 		if (pa)
143 			return (TRUE);
144 		return (FALSE);
145 	}
146 
147 	sva = PG_PFNUM(va);
148 	if (va < 0x40000000) {
149 		if (sva > (pmap->pm_p0lr & ~AST_MASK))
150 			return FALSE;
151 		pte = (int *)pmap->pm_p0br;
152 	} else {
153 		if (sva < pmap->pm_p1lr)
154 			return FALSE;
155 		pte = (int *)pmap->pm_p1br;
156 	}
157 	if (kvtopte(&pte[sva])->pg_pfn) {
158 		if (pap)
159 			*pap = (pte[sva] & PG_FRAME) << VAX_PGSHIFT;
160 		return (TRUE);
161 	}
162 	return (FALSE);
163 }
164 
165 boolean_t pmap_clear_modify_long(struct pv_entry *);
166 boolean_t pmap_clear_reference_long(struct pv_entry *);
167 boolean_t pmap_is_modified_long(struct pv_entry *);
168 void pmap_page_protect_long(struct pv_entry *, vm_prot_t);
169 void pmap_protect_long(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
170 
171 __inline static boolean_t
172 pmap_clear_reference(struct vm_page *pg)
173 {
174 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
175 	boolean_t rv = (pv->pv_attr & PG_V) != 0;
176 
177 	pv->pv_attr &= ~PG_V;
178 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
179 		rv |= pmap_clear_reference_long(pv);
180 	return rv;
181 }
182 
183 __inline static boolean_t
184 pmap_clear_modify(struct vm_page *pg)
185 {
186 	struct  pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
187 	boolean_t rv = (pv->pv_attr & PG_M) != 0;
188 
189 	pv->pv_attr &= ~PG_M;
190 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
191 		rv |= pmap_clear_modify_long(pv);
192 	return rv;
193 }
194 
195 __inline static boolean_t
196 pmap_is_modified(struct vm_page *pg)
197 {
198 	struct pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
199 	if (pv->pv_attr & PG_M)
200 		return 1;
201 	else
202 		return pmap_is_modified_long(pv);
203 }
204 
205 __inline static void
206 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
207 {
208 	struct  pv_entry *pv = pv_table + (VM_PAGE_TO_PHYS(pg) >> PGSHIFT);
209 
210 	if (pv->pv_pmap != NULL || pv->pv_next != NULL)
211 		pmap_page_protect_long(pv, prot);
212 }
213 
214 __inline static void
215 pmap_protect(pmap_t pmap, vaddr_t start, vaddr_t end, vm_prot_t prot)
216 {
217 	if (pmap->pm_p0lr != 0 || pmap->pm_p1lr != 0x200000 ||
218 	    (start & KERNBASE) != 0)
219 		pmap_protect_long(pmap, start, end, prot);
220 }
221 
222 static __inline void
223 pmap_remove_all(struct pmap *pmap)
224 {
225 	/* Nothing. */
226 }
227 
228 /* Routines that are best to define as macros */
229 #define pmap_phys_address(phys)		((u_int)(phys) << PGSHIFT)
230 #define pmap_copy(a,b,c,d,e)		/* Dont do anything */
231 #define pmap_update(pmap)		/* nothing (yet) */
232 #define pmap_collect(pmap)		/* No need so far */
233 #define pmap_remove(pmap, start, slut)	pmap_protect(pmap, start, slut, 0)
234 #define pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
235 #define pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
236 #define pmap_reference(pmap)		(pmap)->pm_count++
237 
238 /* These can be done as efficient inline macros */
239 #define pmap_copy_page(src, dst)			\
240 	__asm__("addl3 $0x80000000,%0,%%r0;"		\
241 		"addl3 $0x80000000,%1,%%r1;"		\
242 		"movc3 $4096,(%%r0),(%%r1)"		\
243 	    :: "r"(src), "r"(dst)			\
244 	    : "r0","r1","r2","r3","r4","r5");
245 
246 #define pmap_zero_page(phys)				\
247 	__asm__("addl3 $0x80000000,%0,%%r0;"		\
248 		"movc5 $0,(%%r0),$0,$4096,(%%r0)"	\
249 	    :: "r"(phys)				\
250 	    : "r0","r1","r2","r3","r4","r5");
251 
252 /* Prototypes */
253 void	pmap_bootstrap __P((void));
254 vaddr_t pmap_map __P((vaddr_t, vaddr_t, vaddr_t, int));
255 
256 #endif /* PMAP_H */
257