xref: /openbsd/sys/arch/sparc64/include/pmap.h (revision beff2798)
1 /*	$OpenBSD: pmap.h,v 1.39 2024/11/07 05:24:43 jsg Exp $	*/
2 /*	$NetBSD: pmap.h,v 1.16 2001/04/22 23:19:30 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6  * Copyright (C) 1995, 1996 TooLs GmbH.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed by TooLs GmbH.
20  * 4. The name of TooLs GmbH may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef	_MACHINE_PMAP_H_
36 #define	_MACHINE_PMAP_H_
37 
38 #ifndef _LOCORE
39 #ifdef	_KERNEL
40 #include <sys/queue.h>
41 #endif
42 #include <sys/mutex.h>
43 #endif
44 
45 /*
46  * This scheme uses 2-level page tables.
47  *
48  * While we're still in 32-bit mode we do the following:
49  *
50  *   offset:						13 bits
51  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
52  * 2nd level: 512 32-bit pointers in the pmap for 	 9 bits
53  *							-------
54  * total:						32 bits
55  *
56  * In 64-bit mode the Spitfire and Blackbird CPUs support only
57  * 44-bit virtual addresses.  All addresses between
58  * 0x0000 07ff ffff ffff and 0xffff f800 0000 0000 are in the
59  * "VA hole" and trap, so we don't have to track them.  However,
60  * we do need to keep them in mind during PT walking.  If they
61  * ever change the size of the address "hole" we need to rework
62  * all the page table handling.
63  *
64  *   offset:						13 bits
65  * 1st level: 1024 64-bit TTEs in an 8K page for	10 bits
66  * 2nd level: 1024 64-bit pointers in an 8K page for 	10 bits
67  * 3rd level: 1024 64-bit pointers in the segmap for 	10 bits
68  *							-------
69  * total:						43 bits
70  *
71  * Of course, this means for 32-bit spaces we always have a (practically)
72  * wasted page for the segmap (only one entry used) and half a page wasted
73  * for the page directory.  We still have need of one extra bit 8^(.
74  */
75 
76 #define HOLESHIFT	(43)
77 
78 #define PTSZ	(PAGE_SIZE/8)
79 #define PDSZ	(PTSZ)
80 #define STSZ	(PTSZ)
81 
82 #define PTSHIFT		(13)
83 #define	PDSHIFT		(10+PTSHIFT)
84 #define STSHIFT		(10+PDSHIFT)
85 
86 #define PTMASK		(PTSZ-1)
87 #define PDMASK		(PDSZ-1)
88 #define STMASK		(STSZ-1)
89 
90 #ifndef _LOCORE
91 
92 #define va_to_seg(v)	(int)((((paddr_t)(v))>>STSHIFT)&STMASK)
93 #define va_to_dir(v)	(int)((((paddr_t)(v))>>PDSHIFT)&PDMASK)
94 #define va_to_pte(v)	(int)((((paddr_t)(v))>>PTSHIFT)&PTMASK)
95 
96 #ifdef	_KERNEL
97 
98 /*
99  * Support for big page sizes.  This maps the page size to the
100  * page bits.
101  */
102 struct page_size_map {
103 	u_int64_t mask;
104 	u_int64_t code;
105 #ifdef DEBUG
106 	u_int64_t use;
107 #endif
108 };
109 extern const struct page_size_map page_size_map[];
110 
111 struct pmap {
112 	struct mutex pm_mtx;
113 	int pm_ctx;		/* Current context */
114 	int pm_refs;		/* ref count */
115 	/*
116 	 * This contains 64-bit pointers to pages that contain
117 	 * 1024 64-bit pointers to page tables.  All addresses
118 	 * are physical.
119 	 *
120 	 * !!! Only touch this through pseg_get() and pseg_set() !!!
121 	 */
122 	paddr_t pm_physaddr;	/* physical address of pm_segs */
123 	int64_t *pm_segs;
124 
125 	struct pmap_statistics pm_stats;
126 };
127 
128 /*
129  * This comes from the PROM and is used to map prom entries.
130  */
131 struct prom_map {
132 	u_int64_t	vstart;
133 	u_int64_t	vsize;
134 	u_int64_t	tte;
135 };
136 
137 #define PMAP_NC		0x001	/* Set the E bit in the page */
138 #define PMAP_NVC	0x002	/* Don't enable the virtual cache */
139 #define PMAP_NOCACHE	PMAP_NC
140 #define PMAP_LITTLE	0x004	/* Map in little endian mode */
141 /* Large page size hints -- we really should use another param to pmap_enter() */
142 #define PMAP_8K		0x000
143 #define PMAP_64K	0x008	/* Use 64K page */
144 #define PMAP_512K	0x010
145 #define PMAP_4M		0x018
146 #define PMAP_SZ_TO_TTE(x)	(((x)&0x018)<<58)
147 /* If these bits are different in va's to the same PA then there is an aliasing in the d$ */
148 #define VA_ALIAS_ALIGN	(1<<14)
149 #define VA_ALIAS_MASK	(VA_ALIAS_ALIGN - 1)
150 
151 typedef	struct pmap *pmap_t;
152 
153 extern struct pmap kernel_pmap_;
154 #define	pmap_kernel()	(&kernel_pmap_)
155 
156 /* int pmap_change_wiring(pmap_t pm, vaddr_t va, boolean_t wired); */
157 #define	pmap_resident_count(pm)		((pm)->pm_stats.resident_count)
158 #define	pmap_update(pm)			/* nothing (yet) */
159 
160 #define pmap_proc_iflush(p,va,len)	/* nothing */
161 #define pmap_init_percpu()		do { /* nothing */ } while (0)
162 
163 void	pmap_bootstrap(u_long, u_long, u_int, u_int);
164 int	pmap_copyinsn(pmap_t, vaddr_t, uint32_t *);
165 
166 /* make sure all page mappings are modulo 16K to prevent d$ aliasing */
167 #define PMAP_PREFER
168 /* pmap prefer alignment */
169 #define PMAP_PREFER_ALIGN()	(VA_ALIAS_ALIGN)
170 /* pmap prefer offset in alignment */
171 #define PMAP_PREFER_OFFSET(of)	((of) & VA_ALIAS_MASK)
172 
173 #define PMAP_CHECK_COPYIN	CPU_ISSUN4V
174 
175 #define PMAP_GROWKERNEL         /* turn on pmap_growkernel interface */
176 
177 #define	__HAVE_PMAP_COLLECT
178 
179 /* SPARC specific? */
180 int	pmap_dumpsize(void);
181 int	pmap_dumpmmu(int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t);
182 int	pmap_pa_exists(paddr_t);
183 
184 /* SPARC64 specific */
185 int	ctx_alloc(struct pmap*);
186 void	ctx_free(struct pmap*);
187 
188 #endif	/* _KERNEL */
189 
190 /*
191  * For each struct vm_page, there is a list of all currently valid virtual
192  * mappings of that page.
193  */
194 typedef struct pv_entry {
195 	struct pv_entry	*pv_next;	/* next pv_entry */
196 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
197 	vaddr_t		 pv_va;		/* virtual address for mapping */
198 } *pv_entry_t;
199 /* PV flags encoded in the low bits of the VA of the first pv_entry */
200 
201 struct vm_page_md {
202 	struct mutex pvmtx;
203 	struct pv_entry pvent;
204 };
205 
206 #define VM_MDPAGE_INIT(pg) do {			\
207 	mtx_init(&(pg)->mdpage.pvmtx, IPL_VM);	\
208 	(pg)->mdpage.pvent.pv_next = NULL;	\
209 	(pg)->mdpage.pvent.pv_pmap = NULL;	\
210 	(pg)->mdpage.pvent.pv_va = 0;		\
211 } while (0)
212 
213 #endif	/* _LOCORE */
214 #endif	/* _MACHINE_PMAP_H_ */
215