xref: /illumos-gate/usr/src/uts/sun4u/vm/mach_vm_dep.c (revision 4e5b757f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 /*	All Rights Reserved   */
28 
29 /*
30  * Portions of this source code were derived from Berkeley 4.3 BSD
31  * under license from the Regents of the University of California.
32  */
33 
34 #pragma ident	"%Z%%M%	%I%	%E% SMI"
35 
36 /*
37  * UNIX machine dependent virtual memory support.
38  */
39 
40 #include <sys/vm.h>
41 #include <sys/exec.h>
42 #include <sys/cmn_err.h>
43 #include <sys/cpu_module.h>
44 #include <sys/cpu.h>
45 #include <sys/elf_SPARC.h>
46 #include <sys/archsystm.h>
47 #include <vm/hat_sfmmu.h>
48 #include <sys/memnode.h>
49 #include <sys/mem_cage.h>
50 #include <vm/vm_dep.h>
51 
52 #if defined(__sparcv9) && defined(SF_ERRATA_57)
53 caddr_t errata57_limit;
54 #endif
55 
56 uint_t page_colors = 0;
57 uint_t page_colors_mask = 0;
58 uint_t page_coloring_shift = 0;
59 int consistent_coloring;
60 
61 uint_t mmu_page_sizes = DEFAULT_MMU_PAGE_SIZES;
62 uint_t max_mmu_page_sizes = MMU_PAGE_SIZES;
63 uint_t mmu_hashcnt = DEFAULT_MAX_HASHCNT;
64 uint_t max_mmu_hashcnt = MAX_HASHCNT;
65 size_t mmu_ism_pagesize = DEFAULT_ISM_PAGESIZE;
66 
67 /*
68  * The sun4u hardware mapping sizes which will always be supported are
69  * 8K, 64K, 512K and 4M.  If sun4u based machines need to support other
70  * page sizes, platform or cpu specific routines need to modify the value.
71  * The base pagesize (p_szc == 0) must always be supported by the hardware.
72  */
73 int mmu_exported_pagesize_mask = (1 << TTE8K) | (1 << TTE64K) |
74 	(1 << TTE512K) | (1 << TTE4M);
75 uint_t mmu_exported_page_sizes;
76 
77 uint_t szc_2_userszc[MMU_PAGE_SIZES];
78 uint_t userszc_2_szc[MMU_PAGE_SIZES];
79 
80 extern uint_t vac_colors_mask;
81 extern int vac_shift;
82 
83 hw_pagesize_t hw_page_array[] = {
84 	{MMU_PAGESIZE, MMU_PAGESHIFT, 0, MMU_PAGESIZE >> MMU_PAGESHIFT},
85 	{MMU_PAGESIZE64K, MMU_PAGESHIFT64K, 0,
86 	    MMU_PAGESIZE64K >> MMU_PAGESHIFT},
87 	{MMU_PAGESIZE512K, MMU_PAGESHIFT512K, 0,
88 	    MMU_PAGESIZE512K >> MMU_PAGESHIFT},
89 	{MMU_PAGESIZE4M, MMU_PAGESHIFT4M, 0, MMU_PAGESIZE4M >> MMU_PAGESHIFT},
90 	{MMU_PAGESIZE32M, MMU_PAGESHIFT32M, 0,
91 	    MMU_PAGESIZE32M >> MMU_PAGESHIFT},
92 	{MMU_PAGESIZE256M, MMU_PAGESHIFT256M, 0,
93 	    MMU_PAGESIZE256M >> MMU_PAGESHIFT},
94 	{0, 0, 0, 0}
95 };
96 
97 /*
98  * Maximum page size used to map 64-bit memory segment kmem64_base..kmem64_end
99  */
100 int	max_bootlp_tteszc = TTE4M;
101 
102 /*
103  * use_text_pgsz64k and use_text_pgsz512k allow the user to turn on these
104  * additional text page sizes for USIII-IV+ and OPL by changing the default
105  * values via /etc/system.
106  */
107 int	use_text_pgsz64K = 0;
108 int	use_text_pgsz512K = 0;
109 
110 /*
111  * Maximum and default segment size tunables for user heap, stack, private
112  * and shared anonymous memory, and user text and initialized data.
113  */
114 size_t max_uheap_lpsize = MMU_PAGESIZE4M;
115 size_t default_uheap_lpsize = MMU_PAGESIZE;
116 size_t max_ustack_lpsize = MMU_PAGESIZE4M;
117 size_t default_ustack_lpsize = MMU_PAGESIZE;
118 size_t max_privmap_lpsize = MMU_PAGESIZE4M;
119 size_t max_uidata_lpsize = MMU_PAGESIZE;
120 size_t max_utext_lpsize = MMU_PAGESIZE4M;
121 size_t max_shm_lpsize = MMU_PAGESIZE4M;
122 
123 void
124 adjust_data_maxlpsize(size_t ismpagesize)
125 {
126 	if (max_uheap_lpsize == MMU_PAGESIZE4M) {
127 		max_uheap_lpsize = ismpagesize;
128 	}
129 	if (max_ustack_lpsize == MMU_PAGESIZE4M) {
130 		max_ustack_lpsize = ismpagesize;
131 	}
132 	if (max_privmap_lpsize == MMU_PAGESIZE4M) {
133 		max_privmap_lpsize = ismpagesize;
134 	}
135 	if (max_shm_lpsize == MMU_PAGESIZE4M) {
136 		max_shm_lpsize = ismpagesize;
137 	}
138 }
139 
140 /*
141  * map_addr_proc() is the routine called when the system is to
142  * choose an address for the user.  We will pick an address
143  * range which is just below the current stack limit.  The
144  * algorithm used for cache consistency on machines with virtual
145  * address caches is such that offset 0 in the vnode is always
146  * on a shm_alignment'ed aligned address.  Unfortunately, this
147  * means that vnodes which are demand paged will not be mapped
148  * cache consistently with the executable images.  When the
149  * cache alignment for a given object is inconsistent, the
150  * lower level code must manage the translations so that this
151  * is not seen here (at the cost of efficiency, of course).
152  *
153  * addrp is a value/result parameter.
154  *	On input it is a hint from the user to be used in a completely
155  *	machine dependent fashion.  For MAP_ALIGN, addrp contains the
156  *	minimal alignment.
157  *
158  *	On output it is NULL if no address can be found in the current
159  *	processes address space or else an address that is currently
160  *	not mapped for len bytes with a page of red zone on either side.
161  *	If vacalign is true, then the selected address will obey the alignment
162  *	constraints of a vac machine based on the given off value.
163  */
164 /*ARGSUSED4*/
165 void
166 map_addr_proc(caddr_t *addrp, size_t len, offset_t off, int vacalign,
167     caddr_t userlimit, struct proc *p, uint_t flags)
168 {
169 	struct as *as = p->p_as;
170 	caddr_t addr;
171 	caddr_t base;
172 	size_t slen;
173 	uintptr_t align_amount;
174 	int allow_largepage_alignment = 1;
175 
176 	base = p->p_brkbase;
177 	if (userlimit < as->a_userlimit) {
178 		/*
179 		 * This happens when a program wants to map something in
180 		 * a range that's accessible to a program in a smaller
181 		 * address space.  For example, a 64-bit program might
182 		 * be calling mmap32(2) to guarantee that the returned
183 		 * address is below 4Gbytes.
184 		 */
185 		ASSERT(userlimit > base);
186 		slen = userlimit - base;
187 	} else {
188 		slen = p->p_usrstack - base - (((size_t)rctl_enforced_value(
189 		    rctlproc_legacy[RLIMIT_STACK], p->p_rctls, p) + PAGEOFFSET)
190 		    & PAGEMASK);
191 	}
192 	len = (len + PAGEOFFSET) & PAGEMASK;
193 
194 	/*
195 	 * Redzone for each side of the request. This is done to leave
196 	 * one page unmapped between segments. This is not required, but
197 	 * it's useful for the user because if their program strays across
198 	 * a segment boundary, it will catch a fault immediately making
199 	 * debugging a little easier.
200 	 */
201 	len += (2 * PAGESIZE);
202 
203 	/*
204 	 *  If the request is larger than the size of a particular
205 	 *  mmu level, then we use that level to map the request.
206 	 *  But this requires that both the virtual and the physical
207 	 *  addresses be aligned with respect to that level, so we
208 	 *  do the virtual bit of nastiness here.
209 	 *
210 	 *  For 32-bit processes, only those which have specified
211 	 *  MAP_ALIGN or an addr will be aligned on a page size > 4MB. Otherwise
212 	 *  we can potentially waste up to 256MB of the 4G process address
213 	 *  space just for alignment.
214 	 */
215 	if (p->p_model == DATAMODEL_ILP32 && ((flags & MAP_ALIGN) == 0 ||
216 	    ((uintptr_t)*addrp) != 0)) {
217 		allow_largepage_alignment = 0;
218 	}
219 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
220 	    allow_largepage_alignment &&
221 		(len >= MMU_PAGESIZE256M)) {	/* 256MB mappings */
222 		align_amount = MMU_PAGESIZE256M;
223 	} else if ((mmu_page_sizes == max_mmu_page_sizes) &&
224 	    allow_largepage_alignment &&
225 		(len >= MMU_PAGESIZE32M)) {	/* 32MB mappings */
226 		align_amount = MMU_PAGESIZE32M;
227 	} else if (len >= MMU_PAGESIZE4M) {  /* 4MB mappings */
228 		align_amount = MMU_PAGESIZE4M;
229 	} else if (len >= MMU_PAGESIZE512K) { /* 512KB mappings */
230 		align_amount = MMU_PAGESIZE512K;
231 	} else if (len >= MMU_PAGESIZE64K) { /* 64KB mappings */
232 		align_amount = MMU_PAGESIZE64K;
233 	} else  {
234 		/*
235 		 * Align virtual addresses on a 64K boundary to ensure
236 		 * that ELF shared libraries are mapped with the appropriate
237 		 * alignment constraints by the run-time linker.
238 		 */
239 		align_amount = ELF_SPARC_MAXPGSZ;
240 		if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp != 0) &&
241 			((uintptr_t)*addrp < align_amount))
242 			align_amount = (uintptr_t)*addrp;
243 	}
244 
245 	/*
246 	 * 64-bit processes require 1024K alignment of ELF shared libraries.
247 	 */
248 	if (p->p_model == DATAMODEL_LP64)
249 		align_amount = MAX(align_amount, ELF_SPARCV9_MAXPGSZ);
250 #ifdef VAC
251 	if (vac && vacalign && (align_amount < shm_alignment))
252 		align_amount = shm_alignment;
253 #endif
254 
255 	if ((flags & MAP_ALIGN) && ((uintptr_t)*addrp > align_amount)) {
256 		align_amount = (uintptr_t)*addrp;
257 	}
258 	len += align_amount;
259 
260 	/*
261 	 * Look for a large enough hole starting below the stack limit.
262 	 * After finding it, use the upper part.  Addition of PAGESIZE is
263 	 * for the redzone as described above.
264 	 */
265 	as_purge(as);
266 	if (as_gap(as, len, &base, &slen, AH_HI, NULL) == 0) {
267 		caddr_t as_addr;
268 
269 		addr = base + slen - len + PAGESIZE;
270 		as_addr = addr;
271 		/*
272 		 * Round address DOWN to the alignment amount,
273 		 * add the offset, and if this address is less
274 		 * than the original address, add alignment amount.
275 		 */
276 		addr = (caddr_t)((uintptr_t)addr & (~(align_amount - 1l)));
277 		addr += (long)(off & (align_amount - 1l));
278 		if (addr < as_addr) {
279 			addr += align_amount;
280 		}
281 
282 		ASSERT(addr <= (as_addr + align_amount));
283 		ASSERT(((uintptr_t)addr & (align_amount - 1l)) ==
284 		    ((uintptr_t)(off & (align_amount - 1l))));
285 		*addrp = addr;
286 
287 #if defined(SF_ERRATA_57)
288 		if (AS_TYPE_64BIT(as) && addr < errata57_limit) {
289 			*addrp = NULL;
290 		}
291 #endif
292 	} else {
293 		*addrp = NULL;	/* no more virtual space */
294 	}
295 }
296 
297 /*
298  * Platform-dependent page scrub call.
299  */
300 void
301 pagescrub(page_t *pp, uint_t off, uint_t len)
302 {
303 	/*
304 	 * For now, we rely on the fact that pagezero() will
305 	 * always clear UEs.
306 	 */
307 	pagezero(pp, off, len);
308 }
309 
310 /*ARGSUSED*/
311 void
312 sync_data_memory(caddr_t va, size_t len)
313 {
314 	cpu_flush_ecache();
315 }
316 
317 /*
318  * platform specific large pages for kernel heap support
319  */
320 void
321 mmu_init_kcontext()
322 {
323 	extern void set_kcontextreg();
324 
325 	if (kcontextreg)
326 		set_kcontextreg();
327 }
328 
329 void
330 contig_mem_init(void)
331 {
332 	/* not applicable to sun4u */
333 }
334 
335 size_t
336 exec_get_spslew(void)
337 {
338 	return (0);
339 }
340