xref: /illumos-gate/usr/src/uts/common/vm/seg_kmem.c (revision a98e9dbf)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/types.h>
29 #include <sys/t_lock.h>
30 #include <sys/param.h>
31 #include <sys/sysmacros.h>
32 #include <sys/tuneable.h>
33 #include <sys/systm.h>
34 #include <sys/vm.h>
35 #include <sys/kmem.h>
36 #include <sys/vmem.h>
37 #include <sys/mman.h>
38 #include <sys/cmn_err.h>
39 #include <sys/debug.h>
40 #include <sys/dumphdr.h>
41 #include <sys/bootconf.h>
42 #include <sys/lgrp.h>
43 #include <vm/seg_kmem.h>
44 #include <vm/hat.h>
45 #include <vm/page.h>
46 #include <vm/vm_dep.h>
47 #include <vm/faultcode.h>
48 #include <sys/promif.h>
49 #include <vm/seg_kp.h>
50 #include <sys/bitmap.h>
51 #include <sys/mem_cage.h>
52 
53 /*
54  * seg_kmem is the primary kernel memory segment driver.  It
55  * maps the kernel heap [kernelheap, ekernelheap), module text,
56  * and all memory which was allocated before the VM was initialized
57  * into kas.
58  *
59  * Pages which belong to seg_kmem are hashed into &kvp vnode at
60  * an offset equal to (u_offset_t)virt_addr, and have p_lckcnt >= 1.
61  * They must never be paged out since segkmem_fault() is a no-op to
62  * prevent recursive faults.
63  *
64  * Currently, seg_kmem pages are sharelocked (p_sharelock == 1) on
65  * __x86 and are unlocked (p_sharelock == 0) on __sparc.  Once __x86
66  * supports relocation the #ifdef kludges can be removed.
67  *
68  * seg_kmem pages may be subject to relocation by page_relocate(),
69  * provided that the HAT supports it; if this is so, segkmem_reloc
70  * will be set to a nonzero value. All boot time allocated memory as
71  * well as static memory is considered off limits to relocation.
72  * Pages are "relocatable" if p_state does not have P_NORELOC set, so
73  * we request P_NORELOC pages for memory that isn't safe to relocate.
74  *
75  * The kernel heap is logically divided up into four pieces:
76  *
77  *   heap32_arena is for allocations that require 32-bit absolute
78  *   virtual addresses (e.g. code that uses 32-bit pointers/offsets).
79  *
80  *   heap_core is for allocations that require 2GB *relative*
81  *   offsets; in other words all memory from heap_core is within
82  *   2GB of all other memory from the same arena. This is a requirement
83  *   of the addressing modes of some processors in supervisor code.
84  *
85  *   heap_arena is the general heap arena.
86  *
87  *   static_arena is the static memory arena.  Allocations from it
88  *   are not subject to relocation so it is safe to use the memory
89  *   physical address as well as the virtual address (e.g. the VA to
90  *   PA translations are static).  Caches may import from static_arena;
91  *   all other static memory allocations should use static_alloc_arena.
92  *
93  * On some platforms which have limited virtual address space, seg_kmem
94  * may share [kernelheap, ekernelheap) with seg_kp; if this is so,
95  * segkp_bitmap is non-NULL, and each bit represents a page of virtual
96  * address space which is actually seg_kp mapped.
97  */
98 
99 extern ulong_t *segkp_bitmap;   /* Is set if segkp is from the kernel heap */
100 
101 char *kernelheap;		/* start of primary kernel heap */
102 char *ekernelheap;		/* end of primary kernel heap */
103 struct seg kvseg;		/* primary kernel heap segment */
104 struct seg kvseg_core;		/* "core" kernel heap segment */
105 struct seg kzioseg;		/* Segment for zio mappings */
106 vmem_t *heap_arena;		/* primary kernel heap arena */
107 vmem_t *heap_core_arena;	/* core kernel heap arena */
108 char *heap_core_base;		/* start of core kernel heap arena */
109 char *heap_lp_base;		/* start of kernel large page heap arena */
110 char *heap_lp_end;		/* end of kernel large page heap arena */
111 vmem_t *hat_memload_arena;	/* HAT translation data */
112 struct seg kvseg32;		/* 32-bit kernel heap segment */
113 vmem_t *heap32_arena;		/* 32-bit kernel heap arena */
114 vmem_t *heaptext_arena;		/* heaptext arena */
115 struct as kas;			/* kernel address space */
116 struct vnode kvp;		/* vnode for all segkmem pages */
117 struct vnode zvp;		/* vnode for zfs pages */
118 int segkmem_reloc;		/* enable/disable relocatable segkmem pages */
119 vmem_t *static_arena;		/* arena for caches to import static memory */
120 vmem_t *static_alloc_arena;	/* arena for allocating static memory */
121 vmem_t *zio_arena = NULL;	/* arena for allocating zio memory */
122 vmem_t *zio_alloc_arena = NULL;	/* arena for allocating zio memory */
123 
124 /*
125  * seg_kmem driver can map part of the kernel heap with large pages.
126  * Currently this functionality is implemented for sparc platforms only.
127  *
128  * The large page size "segkmem_lpsize" for kernel heap is selected in the
129  * platform specific code. It can also be modified via /etc/system file.
130  * Setting segkmem_lpsize to PAGESIZE in /etc/system disables usage of large
131  * pages for kernel heap. "segkmem_lpshift" is adjusted appropriately to
132  * match segkmem_lpsize.
133  *
134  * At boot time we carve from kernel heap arena a range of virtual addresses
135  * that will be used for large page mappings. This range [heap_lp_base,
136  * heap_lp_end) is set up as a separate vmem arena - "heap_lp_arena". We also
137  * create "kmem_lp_arena" that caches memory already backed up by large
138  * pages. kmem_lp_arena imports virtual segments from heap_lp_arena.
139  */
140 
141 size_t	segkmem_lpsize;
142 static  uint_t	segkmem_lpshift = PAGESHIFT;
143 int	segkmem_lpszc = 0;
144 
145 size_t  segkmem_kmemlp_quantum = 0x400000;	/* 4MB */
146 size_t  segkmem_heaplp_quantum;
147 vmem_t *heap_lp_arena;
148 static  vmem_t *kmem_lp_arena;
149 static  vmem_t *segkmem_ppa_arena;
150 static	segkmem_lpcb_t segkmem_lpcb;
151 
152 /*
153  * We use "segkmem_kmemlp_max" to limit the total amount of physical memory
154  * consumed by the large page heap. By default this parameter is set to 1/8 of
155  * physmem but can be adjusted through /etc/system either directly or
156  * indirectly by setting "segkmem_kmemlp_pcnt" to the percent of physmem
157  * we allow for large page heap.
158  */
159 size_t  segkmem_kmemlp_max;
160 static  uint_t  segkmem_kmemlp_pcnt;
161 
162 /*
163  * Getting large pages for kernel heap could be problematic due to
164  * physical memory fragmentation. That's why we allow to preallocate
165  * "segkmem_kmemlp_min" bytes at boot time.
166  */
167 static  size_t	segkmem_kmemlp_min;
168 
169 /*
170  * Throttling is used to avoid expensive tries to allocate large pages
171  * for kernel heap when a lot of succesive attempts to do so fail.
172  */
173 static  ulong_t segkmem_lpthrottle_max = 0x400000;
174 static  ulong_t segkmem_lpthrottle_start = 0x40;
175 static  ulong_t segkmem_use_lpthrottle = 1;
176 
177 /*
178  * Freed pages accumulate on a garbage list until segkmem is ready,
179  * at which point we call segkmem_gc() to free it all.
180  */
181 typedef struct segkmem_gc_list {
182 	struct segkmem_gc_list	*gc_next;
183 	vmem_t			*gc_arena;
184 	size_t			gc_size;
185 } segkmem_gc_list_t;
186 
187 static segkmem_gc_list_t *segkmem_gc_list;
188 
189 /*
190  * Allocations from the hat_memload arena add VM_MEMLOAD to their
191  * vmflags so that segkmem_xalloc() can inform the hat layer that it needs
192  * to take steps to prevent infinite recursion.  HAT allocations also
193  * must be non-relocatable to prevent recursive page faults.
194  */
195 static void *
196 hat_memload_alloc(vmem_t *vmp, size_t size, int flags)
197 {
198 	flags |= (VM_MEMLOAD | VM_NORELOC);
199 	return (segkmem_alloc(vmp, size, flags));
200 }
201 
202 /*
203  * Allocations from static_arena arena (or any other arena that uses
204  * segkmem_alloc_permanent()) require non-relocatable (permanently
205  * wired) memory pages, since these pages are referenced by physical
206  * as well as virtual address.
207  */
208 void *
209 segkmem_alloc_permanent(vmem_t *vmp, size_t size, int flags)
210 {
211 	return (segkmem_alloc(vmp, size, flags | VM_NORELOC));
212 }
213 
214 /*
215  * Initialize kernel heap boundaries.
216  */
217 void
218 kernelheap_init(
219 	void *heap_start,
220 	void *heap_end,
221 	char *first_avail,
222 	void *core_start,
223 	void *core_end)
224 {
225 	uintptr_t textbase;
226 	size_t core_size;
227 	size_t heap_size;
228 	vmem_t *heaptext_parent;
229 	size_t	heap_lp_size = 0;
230 #ifdef __sparc
231 	size_t kmem64_sz = kmem64_aligned_end - kmem64_base;
232 #endif	/* __sparc */
233 
234 	kernelheap = heap_start;
235 	ekernelheap = heap_end;
236 
237 #ifdef __sparc
238 	heap_lp_size = (((uintptr_t)heap_end - (uintptr_t)heap_start) / 4);
239 	/*
240 	 * Bias heap_lp start address by kmem64_sz to reduce collisions
241 	 * in 4M kernel TSB between kmem64 area and heap_lp
242 	 */
243 	kmem64_sz = P2ROUNDUP(kmem64_sz, MMU_PAGESIZE256M);
244 	if (kmem64_sz <= heap_lp_size / 2)
245 		heap_lp_size -= kmem64_sz;
246 	heap_lp_base = ekernelheap - heap_lp_size;
247 	heap_lp_end = heap_lp_base + heap_lp_size;
248 #endif	/* __sparc */
249 
250 	/*
251 	 * If this platform has a 'core' heap area, then the space for
252 	 * overflow module text should be carved out of the end of that
253 	 * heap.  Otherwise, it gets carved out of the general purpose
254 	 * heap.
255 	 */
256 	core_size = (uintptr_t)core_end - (uintptr_t)core_start;
257 	if (core_size > 0) {
258 		ASSERT(core_size >= HEAPTEXT_SIZE);
259 		textbase = (uintptr_t)core_end - HEAPTEXT_SIZE;
260 		core_size -= HEAPTEXT_SIZE;
261 	}
262 #ifndef __sparc
263 	else {
264 		ekernelheap -= HEAPTEXT_SIZE;
265 		textbase = (uintptr_t)ekernelheap;
266 	}
267 #endif
268 
269 	heap_size = (uintptr_t)ekernelheap - (uintptr_t)kernelheap;
270 	heap_arena = vmem_init("heap", kernelheap, heap_size, PAGESIZE,
271 	    segkmem_alloc, segkmem_free);
272 
273 	if (core_size > 0) {
274 		heap_core_arena = vmem_create("heap_core", core_start,
275 		    core_size, PAGESIZE, NULL, NULL, NULL, 0, VM_SLEEP);
276 		heap_core_base = core_start;
277 	} else {
278 		heap_core_arena = heap_arena;
279 		heap_core_base = kernelheap;
280 	}
281 
282 	/*
283 	 * reserve space for the large page heap. If large pages for kernel
284 	 * heap is enabled large page heap arean will be created later in the
285 	 * boot sequence in segkmem_heap_lp_init(). Otherwise the allocated
286 	 * range will be returned back to the heap_arena.
287 	 */
288 	if (heap_lp_size) {
289 		(void) vmem_xalloc(heap_arena, heap_lp_size, PAGESIZE, 0, 0,
290 		    heap_lp_base, heap_lp_end,
291 		    VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
292 	}
293 
294 	/*
295 	 * Remove the already-spoken-for memory range [kernelheap, first_avail).
296 	 */
297 	(void) vmem_xalloc(heap_arena, first_avail - kernelheap, PAGESIZE,
298 	    0, 0, kernelheap, first_avail, VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
299 
300 #ifdef __sparc
301 	heap32_arena = vmem_create("heap32", (void *)SYSBASE32,
302 	    SYSLIMIT32 - SYSBASE32 - HEAPTEXT_SIZE, PAGESIZE, NULL,
303 	    NULL, NULL, 0, VM_SLEEP);
304 
305 	textbase = SYSLIMIT32 - HEAPTEXT_SIZE;
306 	heaptext_parent = NULL;
307 #else	/* __sparc */
308 	heap32_arena = heap_core_arena;
309 	heaptext_parent = heap_core_arena;
310 #endif	/* __sparc */
311 
312 	heaptext_arena = vmem_create("heaptext", (void *)textbase,
313 	    HEAPTEXT_SIZE, PAGESIZE, NULL, NULL, heaptext_parent, 0, VM_SLEEP);
314 
315 	/*
316 	 * Create a set of arenas for memory with static translations
317 	 * (e.g. VA -> PA translations cannot change).  Since using
318 	 * kernel pages by physical address implies it isn't safe to
319 	 * walk across page boundaries, the static_arena quantum must
320 	 * be PAGESIZE.  Any kmem caches that require static memory
321 	 * should source from static_arena, while direct allocations
322 	 * should only use static_alloc_arena.
323 	 */
324 	static_arena = vmem_create("static", NULL, 0, PAGESIZE,
325 	    segkmem_alloc_permanent, segkmem_free, heap_arena, 0, VM_SLEEP);
326 	static_alloc_arena = vmem_create("static_alloc", NULL, 0,
327 	    sizeof (uint64_t), vmem_alloc, vmem_free, static_arena,
328 	    0, VM_SLEEP);
329 
330 	/*
331 	 * Create an arena for translation data (ptes, hmes, or hblks).
332 	 * We need an arena for this because hat_memload() is essential
333 	 * to vmem_populate() (see comments in common/os/vmem.c).
334 	 *
335 	 * Note: any kmem cache that allocates from hat_memload_arena
336 	 * must be created as a KMC_NOHASH cache (i.e. no external slab
337 	 * and bufctl structures to allocate) so that slab creation doesn't
338 	 * require anything more than a single vmem_alloc().
339 	 */
340 	hat_memload_arena = vmem_create("hat_memload", NULL, 0, PAGESIZE,
341 	    hat_memload_alloc, segkmem_free, heap_arena, 0,
342 	    VM_SLEEP | VMC_POPULATOR);
343 }
344 
345 void
346 boot_mapin(caddr_t addr, size_t size)
347 {
348 	caddr_t	 eaddr;
349 	page_t	*pp;
350 	pfn_t	 pfnum;
351 
352 	if (page_resv(btop(size), KM_NOSLEEP) == 0)
353 		panic("boot_mapin: page_resv failed");
354 
355 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
356 		pfnum = va_to_pfn(addr);
357 		if (pfnum == PFN_INVALID)
358 			continue;
359 		if ((pp = page_numtopp_nolock(pfnum)) == NULL)
360 			panic("boot_mapin(): No pp for pfnum = %lx", pfnum);
361 
362 		/*
363 		 * must break up any large pages that may have constituent
364 		 * pages being utilized for BOP_ALLOC()'s before calling
365 		 * page_numtopp().The locking code (ie. page_reclaim())
366 		 * can't handle them
367 		 */
368 		if (pp->p_szc != 0)
369 			page_boot_demote(pp);
370 
371 		pp = page_numtopp(pfnum, SE_EXCL);
372 		if (pp == NULL || PP_ISFREE(pp))
373 			panic("boot_alloc: pp is NULL or free");
374 
375 		/*
376 		 * If the cage is on but doesn't yet contain this page,
377 		 * mark it as non-relocatable.
378 		 */
379 		if (kcage_on && !PP_ISNORELOC(pp))
380 			PP_SETNORELOC(pp);
381 
382 		(void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
383 		pp->p_lckcnt = 1;
384 #if defined(__x86)
385 		page_downgrade(pp);
386 #else
387 		page_unlock(pp);
388 #endif
389 	}
390 }
391 
392 /*
393  * Get pages from boot and hash them into the kernel's vp.
394  * Used after page structs have been allocated, but before segkmem is ready.
395  */
396 void *
397 boot_alloc(void *inaddr, size_t size, uint_t align)
398 {
399 	caddr_t addr = inaddr;
400 
401 	if (bootops == NULL)
402 		prom_panic("boot_alloc: attempt to allocate memory after "
403 		    "BOP_GONE");
404 
405 	size = ptob(btopr(size));
406 	if (BOP_ALLOC(bootops, addr, size, align) != addr)
407 		panic("boot_alloc: BOP_ALLOC failed");
408 	boot_mapin((caddr_t)addr, size);
409 	return (addr);
410 }
411 
412 static void
413 segkmem_badop()
414 {
415 	panic("segkmem_badop");
416 }
417 
418 #define	SEGKMEM_BADOP(t)	(t(*)())segkmem_badop
419 
420 /*ARGSUSED*/
421 static faultcode_t
422 segkmem_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t size,
423 	enum fault_type type, enum seg_rw rw)
424 {
425 	pgcnt_t npages;
426 	spgcnt_t pg;
427 	page_t *pp;
428 	struct vnode *vp = seg->s_data;
429 
430 	ASSERT(RW_READ_HELD(&seg->s_as->a_lock));
431 
432 	if (seg->s_as != &kas || size > seg->s_size ||
433 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
434 		panic("segkmem_fault: bad args");
435 
436 	/*
437 	 * If it is one of segkp pages, call segkp_fault.
438 	 */
439 	if (segkp_bitmap && seg == &kvseg &&
440 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
441 		return (SEGOP_FAULT(hat, segkp, addr, size, type, rw));
442 
443 	if (rw != S_READ && rw != S_WRITE && rw != S_OTHER)
444 		return (FC_NOSUPPORT);
445 
446 	npages = btopr(size);
447 
448 	switch (type) {
449 	case F_SOFTLOCK:	/* lock down already-loaded translations */
450 		for (pg = 0; pg < npages; pg++) {
451 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
452 			    SE_SHARED);
453 			if (pp == NULL) {
454 				/*
455 				 * Hmm, no page. Does a kernel mapping
456 				 * exist for it?
457 				 */
458 				if (!hat_probe(kas.a_hat, addr)) {
459 					addr -= PAGESIZE;
460 					while (--pg >= 0) {
461 						pp = page_find(vp, (u_offset_t)
462 						    (uintptr_t)addr);
463 						if (pp)
464 							page_unlock(pp);
465 						addr -= PAGESIZE;
466 					}
467 					return (FC_NOMAP);
468 				}
469 			}
470 			addr += PAGESIZE;
471 		}
472 		if (rw == S_OTHER)
473 			hat_reserve(seg->s_as, addr, size);
474 		return (0);
475 	case F_SOFTUNLOCK:
476 		while (npages--) {
477 			pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
478 			if (pp)
479 				page_unlock(pp);
480 			addr += PAGESIZE;
481 		}
482 		return (0);
483 	default:
484 		return (FC_NOSUPPORT);
485 	}
486 	/*NOTREACHED*/
487 }
488 
489 static int
490 segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
491 {
492 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
493 
494 	if (seg->s_as != &kas || size > seg->s_size ||
495 	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
496 		panic("segkmem_setprot: bad args");
497 
498 	/*
499 	 * If it is one of segkp pages, call segkp.
500 	 */
501 	if (segkp_bitmap && seg == &kvseg &&
502 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
503 		return (SEGOP_SETPROT(segkp, addr, size, prot));
504 
505 	if (prot == 0)
506 		hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
507 	else
508 		hat_chgprot(kas.a_hat, addr, size, prot);
509 	return (0);
510 }
511 
512 /*
513  * This is a dummy segkmem function overloaded to call segkp
514  * when segkp is under the heap.
515  */
516 /* ARGSUSED */
517 static int
518 segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
519 {
520 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
521 
522 	if (seg->s_as != &kas)
523 		segkmem_badop();
524 
525 	/*
526 	 * If it is one of segkp pages, call into segkp.
527 	 */
528 	if (segkp_bitmap && seg == &kvseg &&
529 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
530 		return (SEGOP_CHECKPROT(segkp, addr, size, prot));
531 
532 	segkmem_badop();
533 	return (0);
534 }
535 
536 /*
537  * This is a dummy segkmem function overloaded to call segkp
538  * when segkp is under the heap.
539  */
540 /* ARGSUSED */
541 static int
542 segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
543 {
544 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
545 
546 	if (seg->s_as != &kas)
547 		segkmem_badop();
548 
549 	/*
550 	 * If it is one of segkp pages, call into segkp.
551 	 */
552 	if (segkp_bitmap && seg == &kvseg &&
553 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
554 		return (SEGOP_KLUSTER(segkp, addr, delta));
555 
556 	segkmem_badop();
557 	return (0);
558 }
559 
560 static void
561 segkmem_xdump_range(void *arg, void *start, size_t size)
562 {
563 	struct as *as = arg;
564 	caddr_t addr = start;
565 	caddr_t addr_end = addr + size;
566 
567 	while (addr < addr_end) {
568 		pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
569 		if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
570 			dump_addpage(as, addr, pfn);
571 		addr += PAGESIZE;
572 		dump_timeleft = dump_timeout;
573 	}
574 }
575 
576 static void
577 segkmem_dump_range(void *arg, void *start, size_t size)
578 {
579 	caddr_t addr = start;
580 	caddr_t addr_end = addr + size;
581 
582 	/*
583 	 * If we are about to start dumping the range of addresses we
584 	 * carved out of the kernel heap for the large page heap walk
585 	 * heap_lp_arena to find what segments are actually populated
586 	 */
587 	if (SEGKMEM_USE_LARGEPAGES &&
588 	    addr == heap_lp_base && addr_end == heap_lp_end &&
589 	    vmem_size(heap_lp_arena, VMEM_ALLOC) < size) {
590 		vmem_walk(heap_lp_arena, VMEM_ALLOC | VMEM_REENTRANT,
591 		    segkmem_xdump_range, arg);
592 	} else {
593 		segkmem_xdump_range(arg, start, size);
594 	}
595 }
596 
597 static void
598 segkmem_dump(struct seg *seg)
599 {
600 	/*
601 	 * The kernel's heap_arena (represented by kvseg) is a very large
602 	 * VA space, most of which is typically unused.  To speed up dumping
603 	 * we use vmem_walk() to quickly find the pieces of heap_arena that
604 	 * are actually in use.  We do the same for heap32_arena and
605 	 * heap_core.
606 	 *
607 	 * We specify VMEM_REENTRANT to vmem_walk() because dump_addpage()
608 	 * may ultimately need to allocate memory.  Reentrant walks are
609 	 * necessarily imperfect snapshots.  The kernel heap continues
610 	 * to change during a live crash dump, for example.  For a normal
611 	 * crash dump, however, we know that there won't be any other threads
612 	 * messing with the heap.  Therefore, at worst, we may fail to dump
613 	 * the pages that get allocated by the act of dumping; but we will
614 	 * always dump every page that was allocated when the walk began.
615 	 *
616 	 * The other segkmem segments are dense (fully populated), so there's
617 	 * no need to use this technique when dumping them.
618 	 *
619 	 * Note: when adding special dump handling for any new sparsely-
620 	 * populated segments, be sure to add similar handling to the ::kgrep
621 	 * code in mdb.
622 	 */
623 	if (seg == &kvseg) {
624 		vmem_walk(heap_arena, VMEM_ALLOC | VMEM_REENTRANT,
625 		    segkmem_dump_range, seg->s_as);
626 #ifndef __sparc
627 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
628 		    segkmem_dump_range, seg->s_as);
629 #endif
630 	} else if (seg == &kvseg_core) {
631 		vmem_walk(heap_core_arena, VMEM_ALLOC | VMEM_REENTRANT,
632 		    segkmem_dump_range, seg->s_as);
633 	} else if (seg == &kvseg32) {
634 		vmem_walk(heap32_arena, VMEM_ALLOC | VMEM_REENTRANT,
635 		    segkmem_dump_range, seg->s_as);
636 		vmem_walk(heaptext_arena, VMEM_ALLOC | VMEM_REENTRANT,
637 		    segkmem_dump_range, seg->s_as);
638 	} else if (seg == &kzioseg) {
639 		/*
640 		 * We don't want to dump pages attached to kzioseg since they
641 		 * contain file data from ZFS.  If this page's segment is
642 		 * kzioseg return instead of writing it to the dump device.
643 		 */
644 		return;
645 	} else {
646 		segkmem_dump_range(seg->s_as, seg->s_base, seg->s_size);
647 	}
648 }
649 
650 /*
651  * lock/unlock kmem pages over a given range [addr, addr+len).
652  * Returns a shadow list of pages in ppp. If there are holes
653  * in the range (e.g. some of the kernel mappings do not have
654  * underlying page_ts) returns ENOTSUP so that as_pagelock()
655  * will handle the range via as_fault(F_SOFTLOCK).
656  */
657 /*ARGSUSED*/
658 static int
659 segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
660 	page_t ***ppp, enum lock_type type, enum seg_rw rw)
661 {
662 	page_t **pplist, *pp;
663 	pgcnt_t npages;
664 	spgcnt_t pg;
665 	size_t nb;
666 	struct vnode *vp = seg->s_data;
667 
668 	ASSERT(ppp != NULL);
669 
670 	/*
671 	 * If it is one of segkp pages, call into segkp.
672 	 */
673 	if (segkp_bitmap && seg == &kvseg &&
674 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
675 		return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));
676 
677 	npages = btopr(len);
678 	nb = sizeof (page_t *) * npages;
679 
680 	if (type == L_PAGEUNLOCK) {
681 		pplist = *ppp;
682 		ASSERT(pplist != NULL);
683 
684 		for (pg = 0; pg < npages; pg++) {
685 			pp = pplist[pg];
686 			page_unlock(pp);
687 		}
688 		kmem_free(pplist, nb);
689 		return (0);
690 	}
691 
692 	ASSERT(type == L_PAGELOCK);
693 
694 	pplist = kmem_alloc(nb, KM_NOSLEEP);
695 	if (pplist == NULL) {
696 		*ppp = NULL;
697 		return (ENOTSUP);	/* take the slow path */
698 	}
699 
700 	for (pg = 0; pg < npages; pg++) {
701 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
702 		if (pp == NULL) {
703 			while (--pg >= 0)
704 				page_unlock(pplist[pg]);
705 			kmem_free(pplist, nb);
706 			*ppp = NULL;
707 			return (ENOTSUP);
708 		}
709 		pplist[pg] = pp;
710 		addr += PAGESIZE;
711 	}
712 
713 	*ppp = pplist;
714 	return (0);
715 }
716 
717 /*
718  * This is a dummy segkmem function overloaded to call segkp
719  * when segkp is under the heap.
720  */
721 /* ARGSUSED */
722 static int
723 segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
724 {
725 	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));
726 
727 	if (seg->s_as != &kas)
728 		segkmem_badop();
729 
730 	/*
731 	 * If it is one of segkp pages, call into segkp.
732 	 */
733 	if (segkp_bitmap && seg == &kvseg &&
734 	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
735 		return (SEGOP_GETMEMID(segkp, addr, memidp));
736 
737 	segkmem_badop();
738 	return (0);
739 }
740 
741 /*ARGSUSED*/
742 static lgrp_mem_policy_info_t *
743 segkmem_getpolicy(struct seg *seg, caddr_t addr)
744 {
745 	return (NULL);
746 }
747 
748 /*ARGSUSED*/
749 static int
750 segkmem_capable(struct seg *seg, segcapability_t capability)
751 {
752 	if (capability == S_CAPABILITY_NOMINFLT)
753 		return (1);
754 	return (0);
755 }
756 
757 static struct seg_ops segkmem_ops = {
758 	SEGKMEM_BADOP(int),		/* dup */
759 	SEGKMEM_BADOP(int),		/* unmap */
760 	SEGKMEM_BADOP(void),		/* free */
761 	segkmem_fault,
762 	SEGKMEM_BADOP(faultcode_t),	/* faulta */
763 	segkmem_setprot,
764 	segkmem_checkprot,
765 	segkmem_kluster,
766 	SEGKMEM_BADOP(size_t),		/* swapout */
767 	SEGKMEM_BADOP(int),		/* sync */
768 	SEGKMEM_BADOP(size_t),		/* incore */
769 	SEGKMEM_BADOP(int),		/* lockop */
770 	SEGKMEM_BADOP(int),		/* getprot */
771 	SEGKMEM_BADOP(u_offset_t),	/* getoffset */
772 	SEGKMEM_BADOP(int),		/* gettype */
773 	SEGKMEM_BADOP(int),		/* getvp */
774 	SEGKMEM_BADOP(int),		/* advise */
775 	segkmem_dump,
776 	segkmem_pagelock,
777 	SEGKMEM_BADOP(int),		/* setpgsz */
778 	segkmem_getmemid,
779 	segkmem_getpolicy,		/* getpolicy */
780 	segkmem_capable,		/* capable */
781 };
782 
783 int
784 segkmem_zio_create(struct seg *seg)
785 {
786 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
787 	seg->s_ops = &segkmem_ops;
788 	seg->s_data = &zvp;
789 	kas.a_size += seg->s_size;
790 	return (0);
791 }
792 
793 int
794 segkmem_create(struct seg *seg)
795 {
796 	ASSERT(seg->s_as == &kas && RW_WRITE_HELD(&kas.a_lock));
797 	seg->s_ops = &segkmem_ops;
798 	seg->s_data = &kvp;
799 	kas.a_size += seg->s_size;
800 	return (0);
801 }
802 
803 /*ARGSUSED*/
804 page_t *
805 segkmem_page_create(void *addr, size_t size, int vmflag, void *arg)
806 {
807 	struct seg kseg;
808 	int pgflags;
809 	struct vnode *vp = arg;
810 
811 	if (vp == NULL)
812 		vp = &kvp;
813 
814 	kseg.s_as = &kas;
815 	pgflags = PG_EXCL;
816 
817 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
818 		pgflags |= PG_NORELOC;
819 	if ((vmflag & VM_NOSLEEP) == 0)
820 		pgflags |= PG_WAIT;
821 	if (vmflag & VM_PANIC)
822 		pgflags |= PG_PANIC;
823 	if (vmflag & VM_PUSHPAGE)
824 		pgflags |= PG_PUSHPAGE;
825 
826 	return (page_create_va(vp, (u_offset_t)(uintptr_t)addr, size,
827 	    pgflags, &kseg, addr));
828 }
829 
830 /*
831  * Allocate pages to back the virtual address range [addr, addr + size).
832  * If addr is NULL, allocate the virtual address space as well.
833  */
834 void *
835 segkmem_xalloc(vmem_t *vmp, void *inaddr, size_t size, int vmflag, uint_t attr,
836 	page_t *(*page_create_func)(void *, size_t, int, void *), void *pcarg)
837 {
838 	page_t *ppl;
839 	caddr_t addr = inaddr;
840 	pgcnt_t npages = btopr(size);
841 	int allocflag;
842 
843 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
844 		return (NULL);
845 
846 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
847 
848 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
849 		if (inaddr == NULL)
850 			vmem_free(vmp, addr, size);
851 		return (NULL);
852 	}
853 
854 	ppl = page_create_func(addr, size, vmflag, pcarg);
855 	if (ppl == NULL) {
856 		if (inaddr == NULL)
857 			vmem_free(vmp, addr, size);
858 		page_unresv(npages);
859 		return (NULL);
860 	}
861 
862 	/*
863 	 * Under certain conditions, we need to let the HAT layer know
864 	 * that it cannot safely allocate memory.  Allocations from
865 	 * the hat_memload vmem arena always need this, to prevent
866 	 * infinite recursion.
867 	 *
868 	 * In addition, the x86 hat cannot safely do memory
869 	 * allocations while in vmem_populate(), because there
870 	 * is no simple bound on its usage.
871 	 */
872 	if (vmflag & VM_MEMLOAD)
873 		allocflag = HAT_NO_KALLOC;
874 #if defined(__x86)
875 	else if (vmem_is_populator())
876 		allocflag = HAT_NO_KALLOC;
877 #endif
878 	else
879 		allocflag = 0;
880 
881 	while (ppl != NULL) {
882 		page_t *pp = ppl;
883 		page_sub(&ppl, pp);
884 		ASSERT(page_iolock_assert(pp));
885 		ASSERT(PAGE_EXCL(pp));
886 		page_io_unlock(pp);
887 		hat_memload(kas.a_hat, (caddr_t)(uintptr_t)pp->p_offset, pp,
888 		    (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
889 		    HAT_LOAD_LOCK | allocflag);
890 		pp->p_lckcnt = 1;
891 #if defined(__x86)
892 		page_downgrade(pp);
893 #else
894 		if (vmflag & SEGKMEM_SHARELOCKED)
895 			page_downgrade(pp);
896 		else
897 			page_unlock(pp);
898 #endif
899 	}
900 
901 	return (addr);
902 }
903 
904 static void *
905 segkmem_alloc_vn(vmem_t *vmp, size_t size, int vmflag, struct vnode *vp)
906 {
907 	void *addr;
908 	segkmem_gc_list_t *gcp, **prev_gcpp;
909 
910 	ASSERT(vp != NULL);
911 
912 	if (kvseg.s_base == NULL) {
913 #ifndef __sparc
914 		if (bootops->bsys_alloc == NULL)
915 			halt("Memory allocation between bop_alloc() and "
916 			    "kmem_alloc().\n");
917 #endif
918 
919 		/*
920 		 * There's not a lot of memory to go around during boot,
921 		 * so recycle it if we can.
922 		 */
923 		for (prev_gcpp = &segkmem_gc_list; (gcp = *prev_gcpp) != NULL;
924 		    prev_gcpp = &gcp->gc_next) {
925 			if (gcp->gc_arena == vmp && gcp->gc_size == size) {
926 				*prev_gcpp = gcp->gc_next;
927 				return (gcp);
928 			}
929 		}
930 
931 		addr = vmem_alloc(vmp, size, vmflag | VM_PANIC);
932 		if (boot_alloc(addr, size, BO_NO_ALIGN) != addr)
933 			panic("segkmem_alloc: boot_alloc failed");
934 		return (addr);
935 	}
936 	return (segkmem_xalloc(vmp, NULL, size, vmflag, 0,
937 	    segkmem_page_create, vp));
938 }
939 
940 void *
941 segkmem_alloc(vmem_t *vmp, size_t size, int vmflag)
942 {
943 	return (segkmem_alloc_vn(vmp, size, vmflag, &kvp));
944 }
945 
946 void *
947 segkmem_zio_alloc(vmem_t *vmp, size_t size, int vmflag)
948 {
949 	return (segkmem_alloc_vn(vmp, size, vmflag, &zvp));
950 }
951 
952 /*
953  * Any changes to this routine must also be carried over to
954  * devmap_free_pages() in the seg_dev driver. This is because
955  * we currently don't have a special kernel segment for non-paged
956  * kernel memory that is exported by drivers to user space.
957  */
958 static void
959 segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
960     void (*func)(page_t *))
961 {
962 	page_t *pp;
963 	caddr_t addr = inaddr;
964 	caddr_t eaddr;
965 	pgcnt_t npages = btopr(size);
966 
967 	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
968 	ASSERT(vp != NULL);
969 
970 	if (kvseg.s_base == NULL) {
971 		segkmem_gc_list_t *gc = inaddr;
972 		gc->gc_arena = vmp;
973 		gc->gc_size = size;
974 		gc->gc_next = segkmem_gc_list;
975 		segkmem_gc_list = gc;
976 		return;
977 	}
978 
979 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
980 
981 	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
982 #if defined(__x86)
983 		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
984 		if (pp == NULL)
985 			panic("segkmem_free: page not found");
986 		if (!page_tryupgrade(pp)) {
987 			/*
988 			 * Some other thread has a sharelock. Wait for
989 			 * it to drop the lock so we can free this page.
990 			 */
991 			page_unlock(pp);
992 			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
993 			    SE_EXCL);
994 		}
995 #else
996 		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
997 #endif
998 		if (pp == NULL)
999 			panic("segkmem_free: page not found");
1000 		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
1001 		pp->p_lckcnt = 0;
1002 		if (func)
1003 			func(pp);
1004 		else
1005 			page_destroy(pp, 0);
1006 	}
1007 	if (func == NULL)
1008 		page_unresv(npages);
1009 
1010 	if (vmp != NULL)
1011 		vmem_free(vmp, inaddr, size);
1012 
1013 }
1014 
1015 void
1016 segkmem_xfree(vmem_t *vmp, void *inaddr, size_t size, void (*func)(page_t *))
1017 {
1018 	segkmem_free_vn(vmp, inaddr, size, &kvp, func);
1019 }
1020 
1021 void
1022 segkmem_free(vmem_t *vmp, void *inaddr, size_t size)
1023 {
1024 	segkmem_free_vn(vmp, inaddr, size, &kvp, NULL);
1025 }
1026 
1027 void
1028 segkmem_zio_free(vmem_t *vmp, void *inaddr, size_t size)
1029 {
1030 	segkmem_free_vn(vmp, inaddr, size, &zvp, NULL);
1031 }
1032 
1033 void
1034 segkmem_gc(void)
1035 {
1036 	ASSERT(kvseg.s_base != NULL);
1037 	while (segkmem_gc_list != NULL) {
1038 		segkmem_gc_list_t *gc = segkmem_gc_list;
1039 		segkmem_gc_list = gc->gc_next;
1040 		segkmem_free(gc->gc_arena, gc, gc->gc_size);
1041 	}
1042 }
1043 
1044 /*
1045  * Legacy entry points from here to end of file.
1046  */
1047 void
1048 segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
1049     pfn_t pfn, uint_t flags)
1050 {
1051 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1052 	hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
1053 	    flags | HAT_LOAD_LOCK);
1054 }
1055 
1056 void
1057 segkmem_mapout(struct seg *seg, void *addr, size_t size)
1058 {
1059 	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1060 }
1061 
1062 void *
1063 kmem_getpages(pgcnt_t npages, int kmflag)
1064 {
1065 	return (kmem_alloc(ptob(npages), kmflag));
1066 }
1067 
1068 void
1069 kmem_freepages(void *addr, pgcnt_t npages)
1070 {
1071 	kmem_free(addr, ptob(npages));
1072 }
1073 
1074 /*
1075  * segkmem_page_create_large() allocates a large page to be used for the kmem
1076  * caches. If kpr is enabled we ask for a relocatable page unless requested
1077  * otherwise. If kpr is disabled we have to ask for a non-reloc page
1078  */
1079 static page_t *
1080 segkmem_page_create_large(void *addr, size_t size, int vmflag, void *arg)
1081 {
1082 	int pgflags;
1083 
1084 	pgflags = PG_EXCL;
1085 
1086 	if (segkmem_reloc == 0 || (vmflag & VM_NORELOC))
1087 		pgflags |= PG_NORELOC;
1088 	if (!(vmflag & VM_NOSLEEP))
1089 		pgflags |= PG_WAIT;
1090 	if (vmflag & VM_PUSHPAGE)
1091 		pgflags |= PG_PUSHPAGE;
1092 
1093 	return (page_create_va_large(&kvp, (u_offset_t)(uintptr_t)addr, size,
1094 	    pgflags, &kvseg, addr, arg));
1095 }
1096 
1097 /*
1098  * Allocate a large page to back the virtual address range
1099  * [addr, addr + size).  If addr is NULL, allocate the virtual address
1100  * space as well.
1101  */
1102 static void *
1103 segkmem_xalloc_lp(vmem_t *vmp, void *inaddr, size_t size, int vmflag,
1104     uint_t attr, page_t *(*page_create_func)(void *, size_t, int, void *),
1105     void *pcarg)
1106 {
1107 	caddr_t addr = inaddr, pa;
1108 	size_t  lpsize = segkmem_lpsize;
1109 	pgcnt_t npages = btopr(size);
1110 	pgcnt_t nbpages = btop(lpsize);
1111 	pgcnt_t nlpages = size >> segkmem_lpshift;
1112 	size_t  ppasize = nbpages * sizeof (page_t *);
1113 	page_t *pp, *rootpp, **ppa, *pplist = NULL;
1114 	int i;
1115 
1116 	vmflag |= VM_NOSLEEP;
1117 
1118 	if (page_resv(npages, vmflag & VM_KMFLAGS) == 0) {
1119 		return (NULL);
1120 	}
1121 
1122 	/*
1123 	 * allocate an array we need for hat_memload_array.
1124 	 * we use a separate arena to avoid recursion.
1125 	 * we will not need this array when hat_memload_array learns pp++
1126 	 */
1127 	if ((ppa = vmem_alloc(segkmem_ppa_arena, ppasize, vmflag)) == NULL) {
1128 		goto fail_array_alloc;
1129 	}
1130 
1131 	if (inaddr == NULL && (addr = vmem_alloc(vmp, size, vmflag)) == NULL)
1132 		goto fail_vmem_alloc;
1133 
1134 	ASSERT(((uintptr_t)addr & (lpsize - 1)) == 0);
1135 
1136 	/* create all the pages */
1137 	for (pa = addr, i = 0; i < nlpages; i++, pa += lpsize) {
1138 		if ((pp = page_create_func(pa, lpsize, vmflag, pcarg)) == NULL)
1139 			goto fail_page_create;
1140 		page_list_concat(&pplist, &pp);
1141 	}
1142 
1143 	/* at this point we have all the resource to complete the request */
1144 	while ((rootpp = pplist) != NULL) {
1145 		for (i = 0; i < nbpages; i++) {
1146 			ASSERT(pplist != NULL);
1147 			pp = pplist;
1148 			page_sub(&pplist, pp);
1149 			ASSERT(page_iolock_assert(pp));
1150 			page_io_unlock(pp);
1151 			ppa[i] = pp;
1152 		}
1153 		/*
1154 		 * Load the locked entry. It's OK to preload the entry into the
1155 		 * TSB since we now support large mappings in the kernel TSB.
1156 		 */
1157 		hat_memload_array(kas.a_hat,
1158 		    (caddr_t)(uintptr_t)rootpp->p_offset, lpsize,
1159 		    ppa, (PROT_ALL & ~PROT_USER) | HAT_NOSYNC | attr,
1160 		    HAT_LOAD_LOCK);
1161 
1162 		for (--i; i >= 0; --i) {
1163 			ppa[i]->p_lckcnt = 1;
1164 			page_unlock(ppa[i]);
1165 		}
1166 	}
1167 
1168 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1169 	return (addr);
1170 
1171 fail_page_create:
1172 	while ((rootpp = pplist) != NULL) {
1173 		for (i = 0, pp = pplist; i < nbpages; i++, pp = pplist) {
1174 			ASSERT(pp != NULL);
1175 			page_sub(&pplist, pp);
1176 			ASSERT(page_iolock_assert(pp));
1177 			page_io_unlock(pp);
1178 		}
1179 		page_destroy_pages(rootpp);
1180 	}
1181 
1182 	if (inaddr == NULL)
1183 		vmem_free(vmp, addr, size);
1184 
1185 fail_vmem_alloc:
1186 	vmem_free(segkmem_ppa_arena, ppa, ppasize);
1187 
1188 fail_array_alloc:
1189 	page_unresv(npages);
1190 
1191 	return (NULL);
1192 }
1193 
1194 static void
1195 segkmem_free_one_lp(caddr_t addr, size_t size)
1196 {
1197 	page_t		*pp, *rootpp = NULL;
1198 	pgcnt_t 	pgs_left = btopr(size);
1199 
1200 	ASSERT(size == segkmem_lpsize);
1201 
1202 	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);
1203 
1204 	for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
1205 		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
1206 		if (pp == NULL)
1207 			panic("segkmem_free_one_lp: page not found");
1208 		ASSERT(PAGE_EXCL(pp));
1209 		pp->p_lckcnt = 0;
1210 		if (rootpp == NULL)
1211 			rootpp = pp;
1212 	}
1213 	ASSERT(rootpp != NULL);
1214 	page_destroy_pages(rootpp);
1215 
1216 	/* page_unresv() is done by the caller */
1217 }
1218 
1219 /*
1220  * This function is called to import new spans into the vmem arenas like
1221  * kmem_default_arena and kmem_oversize_arena. It first tries to import
1222  * spans from large page arena - kmem_lp_arena. In order to do this it might
1223  * have to "upgrade the requested size" to kmem_lp_arena quantum. If
1224  * it was not able to satisfy the upgraded request it then calls regular
1225  * segkmem_alloc() that satisfies the request by importing from "*vmp" arena
1226  */
1227 /*ARGSUSED*/
1228 void *
1229 segkmem_alloc_lp(vmem_t *vmp, size_t *sizep, size_t align, int vmflag)
1230 {
1231 	size_t size;
1232 	kthread_t *t = curthread;
1233 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1234 
1235 	ASSERT(sizep != NULL);
1236 
1237 	size = *sizep;
1238 
1239 	if (lpcb->lp_uselp && !(t->t_flag & T_PANIC) &&
1240 	    !(vmflag & SEGKMEM_SHARELOCKED)) {
1241 
1242 		size_t kmemlp_qnt = segkmem_kmemlp_quantum;
1243 		size_t asize = P2ROUNDUP(size, kmemlp_qnt);
1244 		void  *addr = NULL;
1245 		ulong_t *lpthrtp = &lpcb->lp_throttle;
1246 		ulong_t lpthrt = *lpthrtp;
1247 		int	dowakeup = 0;
1248 		int	doalloc = 1;
1249 
1250 		ASSERT(kmem_lp_arena != NULL);
1251 		ASSERT(asize >= size);
1252 
1253 		if (lpthrt != 0) {
1254 			/* try to update the throttle value */
1255 			lpthrt = atomic_add_long_nv(lpthrtp, 1);
1256 			if (lpthrt >= segkmem_lpthrottle_max) {
1257 				lpthrt = atomic_cas_ulong(lpthrtp, lpthrt,
1258 				    segkmem_lpthrottle_max / 4);
1259 			}
1260 
1261 			/*
1262 			 * when we get above throttle start do an exponential
1263 			 * backoff at trying large pages and reaping
1264 			 */
1265 			if (lpthrt > segkmem_lpthrottle_start &&
1266 			    (lpthrt & (lpthrt - 1))) {
1267 				lpcb->allocs_throttled++;
1268 				lpthrt--;
1269 				if ((lpthrt & (lpthrt - 1)) == 0)
1270 					kmem_reap();
1271 				return (segkmem_alloc(vmp, size, vmflag));
1272 			}
1273 		}
1274 
1275 		if (!(vmflag & VM_NOSLEEP) &&
1276 		    segkmem_heaplp_quantum >= (8 * kmemlp_qnt) &&
1277 		    vmem_size(kmem_lp_arena, VMEM_FREE) <= kmemlp_qnt &&
1278 		    asize < (segkmem_heaplp_quantum - kmemlp_qnt)) {
1279 
1280 			/*
1281 			 * we are low on free memory in kmem_lp_arena
1282 			 * we let only one guy to allocate heap_lp
1283 			 * quantum size chunk that everybody is going to
1284 			 * share
1285 			 */
1286 			mutex_enter(&lpcb->lp_lock);
1287 
1288 			if (lpcb->lp_wait) {
1289 
1290 				/* we are not the first one - wait */
1291 				cv_wait(&lpcb->lp_cv, &lpcb->lp_lock);
1292 				if (vmem_size(kmem_lp_arena, VMEM_FREE) <
1293 				    kmemlp_qnt)  {
1294 					doalloc = 0;
1295 				}
1296 			} else if (vmem_size(kmem_lp_arena, VMEM_FREE) <=
1297 			    kmemlp_qnt) {
1298 
1299 				/*
1300 				 * we are the first one, make sure we import
1301 				 * a large page
1302 				 */
1303 				if (asize == kmemlp_qnt)
1304 					asize += kmemlp_qnt;
1305 				dowakeup = 1;
1306 				lpcb->lp_wait = 1;
1307 			}
1308 
1309 			mutex_exit(&lpcb->lp_lock);
1310 		}
1311 
1312 		/*
1313 		 * VM_ABORT flag prevents sleeps in vmem_xalloc when
1314 		 * large pages are not available. In that case this allocation
1315 		 * attempt will fail and we will retry allocation with small
1316 		 * pages. We also do not want to panic if this allocation fails
1317 		 * because we are going to retry.
1318 		 */
1319 		if (doalloc) {
1320 			addr = vmem_alloc(kmem_lp_arena, asize,
1321 			    (vmflag | VM_ABORT) & ~VM_PANIC);
1322 
1323 			if (dowakeup) {
1324 				mutex_enter(&lpcb->lp_lock);
1325 				ASSERT(lpcb->lp_wait != 0);
1326 				lpcb->lp_wait = 0;
1327 				cv_broadcast(&lpcb->lp_cv);
1328 				mutex_exit(&lpcb->lp_lock);
1329 			}
1330 		}
1331 
1332 		if (addr != NULL) {
1333 			*sizep = asize;
1334 			*lpthrtp = 0;
1335 			return (addr);
1336 		}
1337 
1338 		if (vmflag & VM_NOSLEEP)
1339 			lpcb->nosleep_allocs_failed++;
1340 		else
1341 			lpcb->sleep_allocs_failed++;
1342 		lpcb->alloc_bytes_failed += size;
1343 
1344 		/* if large page throttling is not started yet do it */
1345 		if (segkmem_use_lpthrottle && lpthrt == 0) {
1346 			lpthrt = atomic_cas_ulong(lpthrtp, lpthrt, 1);
1347 		}
1348 	}
1349 	return (segkmem_alloc(vmp, size, vmflag));
1350 }
1351 
1352 void
1353 segkmem_free_lp(vmem_t *vmp, void *inaddr, size_t size)
1354 {
1355 	if (kmem_lp_arena == NULL || !IS_KMEM_VA_LARGEPAGE((caddr_t)inaddr)) {
1356 		segkmem_free(vmp, inaddr, size);
1357 	} else {
1358 		vmem_free(kmem_lp_arena, inaddr, size);
1359 	}
1360 }
1361 
1362 /*
1363  * segkmem_alloc_lpi() imports virtual memory from large page heap arena
1364  * into kmem_lp arena. In the process it maps the imported segment with
1365  * large pages
1366  */
1367 static void *
1368 segkmem_alloc_lpi(vmem_t *vmp, size_t size, int vmflag)
1369 {
1370 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1371 	void  *addr;
1372 
1373 	ASSERT(size != 0);
1374 	ASSERT(vmp == heap_lp_arena);
1375 
1376 	/* do not allow large page heap grow beyound limits */
1377 	if (vmem_size(vmp, VMEM_ALLOC) >= segkmem_kmemlp_max) {
1378 		lpcb->allocs_limited++;
1379 		return (NULL);
1380 	}
1381 
1382 	addr = segkmem_xalloc_lp(vmp, NULL, size, vmflag, 0,
1383 	    segkmem_page_create_large, NULL);
1384 	return (addr);
1385 }
1386 
1387 /*
1388  * segkmem_free_lpi() returns virtual memory back into large page heap arena
1389  * from kmem_lp arena. Beore doing this it unmaps the segment and frees
1390  * large pages used to map it.
1391  */
1392 static void
1393 segkmem_free_lpi(vmem_t *vmp, void *inaddr, size_t size)
1394 {
1395 	pgcnt_t		nlpages = size >> segkmem_lpshift;
1396 	size_t		lpsize = segkmem_lpsize;
1397 	caddr_t		addr = inaddr;
1398 	pgcnt_t 	npages = btopr(size);
1399 	int		i;
1400 
1401 	ASSERT(vmp == heap_lp_arena);
1402 	ASSERT(IS_KMEM_VA_LARGEPAGE(addr));
1403 	ASSERT(((uintptr_t)inaddr & (lpsize - 1)) == 0);
1404 
1405 	for (i = 0; i < nlpages; i++) {
1406 		segkmem_free_one_lp(addr, lpsize);
1407 		addr += lpsize;
1408 	}
1409 
1410 	page_unresv(npages);
1411 
1412 	vmem_free(vmp, inaddr, size);
1413 }
1414 
1415 /*
1416  * This function is called at system boot time by kmem_init right after
1417  * /etc/system file has been read. It checks based on hardware configuration
1418  * and /etc/system settings if system is going to use large pages. The
1419  * initialiazation necessary to actually start using large pages
1420  * happens later in the process after segkmem_heap_lp_init() is called.
1421  */
1422 int
1423 segkmem_lpsetup()
1424 {
1425 	int use_large_pages = 0;
1426 
1427 #ifdef __sparc
1428 
1429 	size_t memtotal = physmem * PAGESIZE;
1430 
1431 	if (heap_lp_base == NULL) {
1432 		segkmem_lpsize = PAGESIZE;
1433 		return (0);
1434 	}
1435 
1436 	/* get a platform dependent value of large page size for kernel heap */
1437 	segkmem_lpsize = get_segkmem_lpsize(segkmem_lpsize);
1438 
1439 	if (segkmem_lpsize <= PAGESIZE) {
1440 		/*
1441 		 * put virtual space reserved for the large page kernel
1442 		 * back to the regular heap
1443 		 */
1444 		vmem_xfree(heap_arena, heap_lp_base,
1445 		    heap_lp_end - heap_lp_base);
1446 		heap_lp_base = NULL;
1447 		heap_lp_end = NULL;
1448 		segkmem_lpsize = PAGESIZE;
1449 		return (0);
1450 	}
1451 
1452 	/* set heap_lp quantum if necessary */
1453 	if (segkmem_heaplp_quantum == 0 ||
1454 	    (segkmem_heaplp_quantum & (segkmem_heaplp_quantum - 1)) ||
1455 	    P2PHASE(segkmem_heaplp_quantum, segkmem_lpsize)) {
1456 		segkmem_heaplp_quantum = segkmem_lpsize;
1457 	}
1458 
1459 	/* set kmem_lp quantum if necessary */
1460 	if (segkmem_kmemlp_quantum == 0 ||
1461 	    (segkmem_kmemlp_quantum & (segkmem_kmemlp_quantum - 1)) ||
1462 	    segkmem_kmemlp_quantum > segkmem_heaplp_quantum) {
1463 		segkmem_kmemlp_quantum = segkmem_heaplp_quantum;
1464 	}
1465 
1466 	/* set total amount of memory allowed for large page kernel heap */
1467 	if (segkmem_kmemlp_max == 0) {
1468 		if (segkmem_kmemlp_pcnt == 0 || segkmem_kmemlp_pcnt > 100)
1469 			segkmem_kmemlp_pcnt = 12;
1470 		segkmem_kmemlp_max = (memtotal * segkmem_kmemlp_pcnt) / 100;
1471 	}
1472 	segkmem_kmemlp_max = P2ROUNDUP(segkmem_kmemlp_max,
1473 	    segkmem_heaplp_quantum);
1474 
1475 	/* fix lp kmem preallocation request if necesssary */
1476 	if (segkmem_kmemlp_min) {
1477 		segkmem_kmemlp_min = P2ROUNDUP(segkmem_kmemlp_min,
1478 		    segkmem_heaplp_quantum);
1479 		if (segkmem_kmemlp_min > segkmem_kmemlp_max)
1480 			segkmem_kmemlp_min = segkmem_kmemlp_max;
1481 	}
1482 
1483 	use_large_pages = 1;
1484 	segkmem_lpszc = page_szc(segkmem_lpsize);
1485 	segkmem_lpshift = page_get_shift(segkmem_lpszc);
1486 
1487 #endif
1488 	return (use_large_pages);
1489 }
1490 
1491 void
1492 segkmem_zio_init(void *zio_mem_base, size_t zio_mem_size)
1493 {
1494 	ASSERT(zio_mem_base != NULL);
1495 	ASSERT(zio_mem_size != 0);
1496 
1497 	zio_arena = vmem_create("zio", zio_mem_base, zio_mem_size, PAGESIZE,
1498 	    NULL, NULL, NULL, 0, VM_SLEEP);
1499 
1500 	zio_alloc_arena = vmem_create("zio_buf", NULL, 0, PAGESIZE,
1501 	    segkmem_zio_alloc, segkmem_zio_free, zio_arena, 0, VM_SLEEP);
1502 
1503 	ASSERT(zio_arena != NULL);
1504 	ASSERT(zio_alloc_arena != NULL);
1505 }
1506 
1507 #ifdef __sparc
1508 
1509 
1510 static void *
1511 segkmem_alloc_ppa(vmem_t *vmp, size_t size, int vmflag)
1512 {
1513 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1514 	void   *addr;
1515 
1516 	if (ppaquantum <= PAGESIZE)
1517 		return (segkmem_alloc(vmp, size, vmflag));
1518 
1519 	ASSERT((size & (ppaquantum - 1)) == 0);
1520 
1521 	addr = vmem_xalloc(vmp, size, ppaquantum, 0, 0, NULL, NULL, vmflag);
1522 	if (addr != NULL && segkmem_xalloc(vmp, addr, size, vmflag, 0,
1523 	    segkmem_page_create, NULL) == NULL) {
1524 		vmem_xfree(vmp, addr, size);
1525 		addr = NULL;
1526 	}
1527 
1528 	return (addr);
1529 }
1530 
1531 static void
1532 segkmem_free_ppa(vmem_t *vmp, void *addr, size_t size)
1533 {
1534 	size_t ppaquantum = btopr(segkmem_lpsize) * sizeof (page_t *);
1535 
1536 	ASSERT(addr != NULL);
1537 
1538 	if (ppaquantum <= PAGESIZE) {
1539 		segkmem_free(vmp, addr, size);
1540 	} else {
1541 		segkmem_free(NULL, addr, size);
1542 		vmem_xfree(vmp, addr, size);
1543 	}
1544 }
1545 
1546 void
1547 segkmem_heap_lp_init()
1548 {
1549 	segkmem_lpcb_t *lpcb = &segkmem_lpcb;
1550 	size_t heap_lp_size = heap_lp_end - heap_lp_base;
1551 	size_t lpsize = segkmem_lpsize;
1552 	size_t ppaquantum;
1553 	void   *addr;
1554 
1555 	if (segkmem_lpsize <= PAGESIZE) {
1556 		ASSERT(heap_lp_base == NULL);
1557 		ASSERT(heap_lp_end == NULL);
1558 		return;
1559 	}
1560 
1561 	ASSERT(segkmem_heaplp_quantum >= lpsize);
1562 	ASSERT((segkmem_heaplp_quantum & (lpsize - 1)) == 0);
1563 	ASSERT(lpcb->lp_uselp == 0);
1564 	ASSERT(heap_lp_base != NULL);
1565 	ASSERT(heap_lp_end != NULL);
1566 	ASSERT(heap_lp_base < heap_lp_end);
1567 	ASSERT(heap_lp_arena == NULL);
1568 	ASSERT(((uintptr_t)heap_lp_base & (lpsize - 1)) == 0);
1569 	ASSERT(((uintptr_t)heap_lp_end & (lpsize - 1)) == 0);
1570 
1571 	/* create large page heap arena */
1572 	heap_lp_arena = vmem_create("heap_lp", heap_lp_base, heap_lp_size,
1573 	    segkmem_heaplp_quantum, NULL, NULL, NULL, 0, VM_SLEEP);
1574 
1575 	ASSERT(heap_lp_arena != NULL);
1576 
1577 	/* This arena caches memory already mapped by large pages */
1578 	kmem_lp_arena = vmem_create("kmem_lp", NULL, 0, segkmem_kmemlp_quantum,
1579 	    segkmem_alloc_lpi, segkmem_free_lpi, heap_lp_arena, 0, VM_SLEEP);
1580 
1581 	ASSERT(kmem_lp_arena != NULL);
1582 
1583 	mutex_init(&lpcb->lp_lock, NULL, MUTEX_DEFAULT, NULL);
1584 	cv_init(&lpcb->lp_cv, NULL, CV_DEFAULT, NULL);
1585 
1586 	/*
1587 	 * this arena is used for the array of page_t pointers necessary
1588 	 * to call hat_mem_load_array
1589 	 */
1590 	ppaquantum = btopr(lpsize) * sizeof (page_t *);
1591 	segkmem_ppa_arena = vmem_create("segkmem_ppa", NULL, 0, ppaquantum,
1592 	    segkmem_alloc_ppa, segkmem_free_ppa, heap_arena, ppaquantum,
1593 	    VM_SLEEP);
1594 
1595 	ASSERT(segkmem_ppa_arena != NULL);
1596 
1597 	/* prealloacate some memory for the lp kernel heap */
1598 	if (segkmem_kmemlp_min) {
1599 
1600 		ASSERT(P2PHASE(segkmem_kmemlp_min,
1601 		    segkmem_heaplp_quantum) == 0);
1602 
1603 		if ((addr = segkmem_alloc_lpi(heap_lp_arena,
1604 		    segkmem_kmemlp_min, VM_SLEEP)) != NULL) {
1605 
1606 			addr = vmem_add(kmem_lp_arena, addr,
1607 			    segkmem_kmemlp_min, VM_SLEEP);
1608 			ASSERT(addr != NULL);
1609 		}
1610 	}
1611 
1612 	lpcb->lp_uselp = 1;
1613 }
1614 
1615 #endif
1616