xref: /freebsd/sys/arm64/include/vmparam.h (revision 2b833162)
1 /*-
2  * Copyright (c) 1990 The Regents of the University of California.
3  * All rights reserved.
4  * Copyright (c) 1994 John S. Dyson
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * William Jolitz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	from: @(#)vmparam.h     5.9 (Berkeley) 5/12/91
35  *	from: FreeBSD: src/sys/i386/include/vmparam.h,v 1.33 2000/03/30
36  * $FreeBSD$
37  */
38 
39 #ifndef	_MACHINE_VMPARAM_H_
40 #define	_MACHINE_VMPARAM_H_
41 
42 /*
43  * Virtual memory related constants, all in bytes
44  */
45 #ifndef MAXTSIZ
46 #define	MAXTSIZ		(1*1024*1024*1024)	/* max text size */
47 #endif
48 #ifndef DFLDSIZ
49 #define	DFLDSIZ		(128*1024*1024)		/* initial data size limit */
50 #endif
51 #ifndef MAXDSIZ
52 #define	MAXDSIZ		(1*1024*1024*1024)	/* max data size */
53 #endif
54 #ifndef DFLSSIZ
55 #define	DFLSSIZ		(128*1024*1024)		/* initial stack size limit */
56 #endif
57 #ifndef MAXSSIZ
58 #define	MAXSSIZ		(1*1024*1024*1024)	/* max stack size */
59 #endif
60 #ifndef SGROWSIZ
61 #define	SGROWSIZ	(128*1024)		/* amount to grow stack */
62 #endif
63 
64 /*
65  * The physical address space is sparsely populated.
66  */
67 #define	VM_PHYSSEG_SPARSE
68 
69 /*
70  * The number of PHYSSEG entries.
71  */
72 #define	VM_PHYSSEG_MAX		64
73 
74 /*
75  * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool
76  * from which physical pages are allocated and VM_FREEPOOL_DIRECT is
77  * the pool from which physical pages for small UMA objects are
78  * allocated.
79  */
80 #define	VM_NFREEPOOL		2
81 #define	VM_FREEPOOL_DEFAULT	0
82 #define	VM_FREEPOOL_DIRECT	1
83 
84 /*
85  * Create one free page lists: VM_FREELIST_DEFAULT is for all physical
86  * pages.
87  */
88 #define	VM_NFREELIST		1
89 #define	VM_FREELIST_DEFAULT	0
90 
91 /*
92  * An allocation size of 16MB is supported in order to optimize the
93  * use of the direct map by UMA.  Specifically, a cache line contains
94  * at most four TTEs, collectively mapping 16MB of physical memory.
95  * By reducing the number of distinct 16MB "pages" that are used by UMA,
96  * the physical memory allocator reduces the likelihood of both 4MB
97  * page TLB misses and cache misses caused by 4MB page TLB misses.
98  */
99 #define	VM_NFREEORDER		12
100 
101 /*
102  * Enable superpage reservations: 1 level.
103  */
104 #ifndef	VM_NRESERVLEVEL
105 #define	VM_NRESERVLEVEL		1
106 #endif
107 
108 /*
109  * Level 0 reservations consist of 512 pages.
110  */
111 #ifndef	VM_LEVEL_0_ORDER
112 #define	VM_LEVEL_0_ORDER	9
113 #endif
114 
115 /**
116  * Address space layout.
117  *
118  * ARMv8 implements up to a 48 bit virtual address space. The address space is
119  * split into 2 regions at each end of the 64 bit address space, with an
120  * out of range "hole" in the middle.
121  *
122  * We use the full 48 bits for each region, however the kernel may only use
123  * a limited range within this space.
124  *
125  * Upper region:    0xffffffffffffffff  Top of virtual memory
126  *
127  *                  0xfffffeffffffffff  End of DMAP
128  *                  0xffffa00000000000  Start of DMAP
129  *
130  *                  0xffff009fffffffff  End of KASAN shadow map
131  *                  0xffff008000000000  Start of KASAN shadow map
132  *
133  *                  0xffff007fffffffff  End of KVA
134  *                  0xffff000000000000  Kernel base address & start of KVA
135  *
136  * Hole:            0xfffeffffffffffff
137  *                  0x0001000000000000
138  *
139  * Lower region:    0x0000ffffffffffff End of user address space
140  *                  0x0000000000000000 Start of user address space
141  *
142  * We use the upper region for the kernel, and the lower region for userland.
143  *
144  * We define some interesting address constants:
145  *
146  * VM_MIN_ADDRESS and VM_MAX_ADDRESS define the start and end of the entire
147  * 64 bit address space, mostly just for convenience.
148  *
149  * VM_MIN_KERNEL_ADDRESS and VM_MAX_KERNEL_ADDRESS define the start and end of
150  * mappable kernel virtual address space.
151  *
152  * VM_MIN_USER_ADDRESS and VM_MAX_USER_ADDRESS define the start and end of the
153  * user address space.
154  */
155 #define	VM_MIN_ADDRESS		(0x0000000000000000UL)
156 #define	VM_MAX_ADDRESS		(0xffffffffffffffffUL)
157 
158 /* 512 GiB of kernel addresses */
159 #define	VM_MIN_KERNEL_ADDRESS	(0xffff000000000000UL)
160 #define	VM_MAX_KERNEL_ADDRESS	(0xffff008000000000UL)
161 
162 /* 128 GiB KASAN shadow map */
163 #define	KASAN_MIN_ADDRESS	(0xffff008000000000UL)
164 #define	KASAN_MAX_ADDRESS	(0xffff00a000000000UL)
165 
166 /* The address bits that hold a pointer authentication code */
167 #define	PAC_ADDR_MASK		(0xff7f000000000000UL)
168 
169 /* If true addr is in the kernel address space */
170 #define	ADDR_IS_KERNEL(addr)	(((addr) & (1ul << 55)) == (1ul << 55))
171 /* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */
172 #define	ADDR_IS_CANONICAL(addr)	\
173     (((addr) & 0xffff000000000000UL) == 0 || \
174      ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
175 #define	ADDR_MAKE_CANONICAL(addr) ({			\
176 	__typeof(addr) _tmp_addr = (addr);		\
177 							\
178 	_tmp_addr &= ~0xffff000000000000UL;		\
179 	if (ADDR_IS_KERNEL(addr))			\
180 		_tmp_addr |= 0xffff000000000000UL;	\
181 							\
182 	_tmp_addr;					\
183 })
184 
185 /* 95 TiB maximum for the direct map region */
186 #define	DMAP_MIN_ADDRESS	(0xffffa00000000000UL)
187 #define	DMAP_MAX_ADDRESS	(0xffffff0000000000UL)
188 
189 #define	DMAP_MIN_PHYSADDR	(dmap_phys_base)
190 #define	DMAP_MAX_PHYSADDR	(dmap_phys_max)
191 
192 /* True if pa is in the dmap range */
193 #define	PHYS_IN_DMAP(pa)	((pa) >= DMAP_MIN_PHYSADDR && \
194     (pa) < DMAP_MAX_PHYSADDR)
195 /* True if va is in the dmap range */
196 #define	VIRT_IN_DMAP(va)	((va) >= DMAP_MIN_ADDRESS && \
197     (va) < (dmap_max_addr))
198 
199 #define	PMAP_HAS_DMAP	1
200 #define	PHYS_TO_DMAP(pa)						\
201 ({									\
202 	KASSERT(PHYS_IN_DMAP(pa),					\
203 	    ("%s: PA out of range, PA: 0x%lx", __func__,		\
204 	    (vm_paddr_t)(pa)));						\
205 	((pa) - dmap_phys_base) + DMAP_MIN_ADDRESS;			\
206 })
207 
208 #define	DMAP_TO_PHYS(va)						\
209 ({									\
210 	KASSERT(VIRT_IN_DMAP(va),					\
211 	    ("%s: VA out of range, VA: 0x%lx", __func__,		\
212 	    (vm_offset_t)(va)));					\
213 	((va) - DMAP_MIN_ADDRESS) + dmap_phys_base;			\
214 })
215 
216 #define	VM_MIN_USER_ADDRESS	(0x0000000000000000UL)
217 #define	VM_MAX_USER_ADDRESS	(0x0001000000000000UL)
218 
219 #define	VM_MINUSER_ADDRESS	(VM_MIN_USER_ADDRESS)
220 #define	VM_MAXUSER_ADDRESS	(VM_MAX_USER_ADDRESS)
221 
222 #define	KERNBASE		(VM_MIN_KERNEL_ADDRESS)
223 #define	SHAREDPAGE		(VM_MAXUSER_ADDRESS - PAGE_SIZE)
224 #define	USRSTACK		SHAREDPAGE
225 
226 /*
227  * How many physical pages per kmem arena virtual page.
228  */
229 #ifndef VM_KMEM_SIZE_SCALE
230 #define	VM_KMEM_SIZE_SCALE	(1)
231 #endif
232 
233 /*
234  * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the
235  * kernel map.
236  */
237 #ifndef VM_KMEM_SIZE_MAX
238 #define	VM_KMEM_SIZE_MAX	((VM_MAX_KERNEL_ADDRESS - \
239     VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5)
240 #endif
241 
242 /*
243  * Initial pagein size of beginning of executable file.
244  */
245 #ifndef	VM_INITIAL_PAGEIN
246 #define	VM_INITIAL_PAGEIN	16
247 #endif
248 
249 #if !defined(KASAN) && !defined(KMSAN)
250 #define	UMA_MD_SMALL_ALLOC
251 #endif
252 
253 #ifndef LOCORE
254 
255 extern vm_paddr_t dmap_phys_base;
256 extern vm_paddr_t dmap_phys_max;
257 extern vm_offset_t dmap_max_addr;
258 extern vm_offset_t vm_max_kernel_address;
259 
260 #endif
261 
262 #define	ZERO_REGION_SIZE	(64 * 1024)	/* 64KB */
263 
264 #define	DEVMAP_MAX_VADDR	VM_MAX_KERNEL_ADDRESS
265 
266 /*
267  * The pmap can create non-transparent large page mappings.
268  */
269 #define	PMAP_HAS_LARGEPAGES	1
270 
271 /*
272  * Need a page dump array for minidump.
273  */
274 #define MINIDUMP_PAGE_TRACKING	1
275 
276 #endif /* !_MACHINE_VMPARAM_H_ */
277