xref: /netbsd/sys/arch/sparc64/include/pmap.h (revision c4a72b64)
1 /*	$NetBSD: pmap.h,v 1.23 2002/09/22 07:19:45 chs Exp $	*/
2 
3 /*-
4  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5  * Copyright (C) 1995, 1996 TooLs GmbH.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by TooLs GmbH.
19  * 4. The name of TooLs GmbH may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #ifndef	_MACHINE_PMAP_H_
35 #define	_MACHINE_PMAP_H_
36 
37 #ifndef _LOCORE
38 #include <machine/pte.h>
39 #include <sys/queue.h>
40 #include <uvm/uvm_object.h>
41 #endif
42 
43 /*
44  * This scheme uses 2-level page tables.
45  *
46  * While we're still in 32-bit mode we do the following:
47  *
48  *   offset:						13 bits
49  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
50  * 2nd level: 512 32-bit pointers in the pmap for 	 9 bits
51  *							-------
52  * total:						32 bits
53  *
54  * In 64-bit mode the Spitfire and Blackbird CPUs support only
55  * 44-bit virtual addresses.  All addresses between
56  * 0x0000 07ff ffff ffff and 0xffff f800 0000 0000 are in the
57  * "VA hole" and trap, so we don't have to track them.  However,
58  * we do need to keep them in mind during PT walking.  If they
59  * ever change the size of the address "hole" we need to rework
60  * all the page table handling.
61  *
62  *   offset:						13 bits
63  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
64  * 2nd level: 1024 64-bit pointers in an 8K page for 	10 bits
65  * 3rd level: 1024 64-bit pointers in the segmap for 	10 bits
66  *							-------
67  * total:						43 bits
68  *
69  * Of course, this means for 32-bit spaces we always have a (practically)
70  * wasted page for the segmap (only one entry used) and half a page wasted
71  * for the page directory.  We still have need of one extra bit 8^(.
72  */
73 
74 #define HOLESHIFT	(43)
75 
76 #define PTSZ	(NBPG/8)
77 #define PDSZ	(PTSZ)
78 #define STSZ	(PTSZ)
79 
80 #define PTSHIFT		(13)
81 #define	PDSHIFT		(10+PTSHIFT)
82 #define STSHIFT		(10+PDSHIFT)
83 
84 #define PTMASK		(PTSZ-1)
85 #define PDMASK		(PDSZ-1)
86 #define STMASK		(STSZ-1)
87 
88 #ifndef _LOCORE
89 
90 /*
91  * Support for big page sizes.  This maps the page size to the
92  * page bits.
93  */
94 struct page_size_map {
95 	u_int64_t mask;
96 	u_int64_t code;
97 #ifdef DEBUG
98 	u_int64_t use;
99 #endif
100 };
101 extern struct page_size_map page_size_map[];
102 
103 /*
104  * Pmap stuff
105  */
106 
107 #define va_to_seg(v)	(int)((((paddr_t)(v))>>STSHIFT)&STMASK)
108 #define va_to_dir(v)	(int)((((paddr_t)(v))>>PDSHIFT)&PDMASK)
109 #define va_to_pte(v)	(int)((((paddr_t)(v))>>PTSHIFT)&PTMASK)
110 
111 struct pmap {
112 	struct uvm_object pm_obj;
113 #define pm_lock pm_obj.vmobjlock
114 #define pm_refs pm_obj.uo_refs
115 	LIST_ENTRY(pmap) pm_list;
116 	int pm_ctx;		/* Current context */
117 
118 	/*
119 	 * This contains 64-bit pointers to pages that contain
120 	 * 1024 64-bit pointers to page tables.  All addresses
121 	 * are physical.
122 	 *
123 	 * !!! Only touch this through pseg_get() and pseg_set() !!!
124 	 */
125 	paddr_t pm_physaddr;	/* physical address of pm_segs */
126 	int64_t *pm_segs;
127 };
128 
129 /*
130  * This comes from the PROM and is used to map prom entries.
131  */
132 struct prom_map {
133 	u_int64_t	vstart;
134 	u_int64_t	vsize;
135 	u_int64_t	tte;
136 };
137 
138 #define PMAP_NC		0x001	/* Set the E bit in the page */
139 #define PMAP_NVC	0x002	/* Don't enable the virtual cache */
140 #define PMAP_LITTLE	0x004	/* Map in little endian mode */
141 /* Large page size hints --
142    we really should use another param to pmap_enter() */
143 #define PMAP_8K		0x000
144 #define PMAP_64K	0x008	/* Use 64K page */
145 #define PMAP_512K	0x010
146 #define PMAP_4M		0x018
147 #define PMAP_SZ_TO_TTE(x)	(((x)&0x018)<<58)
148 /* If these bits are different in va's to the same PA
149    then there is an aliasing in the d$ */
150 #define VA_ALIAS_MASK   (1 << 14)
151 
152 typedef	struct pmap *pmap_t;
153 
154 #ifdef	_KERNEL
155 extern struct pmap kernel_pmap_;
156 #define	pmap_kernel()	(&kernel_pmap_)
157 
158 int pmap_count_res __P((struct pmap *));
159 int pmap_count_wired __P((struct pmap *));
160 #define	pmap_resident_count(pm)		pmap_count_res((pm))
161 #define	pmap_wired_count(pm)		pmap_count_wired((pm))
162 #define	pmap_phys_address(x)		(x)
163 
164 void pmap_activate_pmap(struct pmap *);
165 
166 static __inline void
167 pmap_update(struct pmap *pmap)
168 {
169 
170 	if (pmap->pm_refs > 0) {
171 		return;
172 	}
173 	pmap->pm_refs = 1;
174 	pmap_activate_pmap(pmap);
175 }
176 
177 void pmap_bootstrap __P((u_long kernelstart, u_long kernelend, u_int numctx));
178 /* make sure all page mappings are modulo 16K to prevent d$ aliasing */
179 #define	PMAP_PREFER(pa, va)	(*(va)+=(((*(va))^(pa))&(1<<(PGSHIFT))))
180 
181 #define	PMAP_GROWKERNEL         /* turn on pmap_growkernel interface */
182 #define PMAP_NEED_PROCWR
183 #define __HAVE_PMAP_PREDESTROY
184 #define __HAVE_PMAP_ACTIVATE_KERNEL
185 
186 void pmap_procwr(struct proc *, vaddr_t, size_t);
187 
188 /* SPARC specific? */
189 int             pmap_dumpsize __P((void));
190 int             pmap_dumpmmu __P((int (*)__P((dev_t, daddr_t, caddr_t, size_t)),
191                                  daddr_t));
192 int		pmap_pa_exists __P((paddr_t));
193 struct proc;
194 void		switchexit __P((struct proc *));
195 
196 /* SPARC64 specific */
197 int	ctx_alloc __P((struct pmap *));
198 void	ctx_free __P((struct pmap *));
199 
200 #endif	/* _KERNEL */
201 
202 #endif	/* _LOCORE */
203 #endif	/* _MACHINE_PMAP_H_ */
204