xref: /dragonfly/sys/vm/vm_zone.c (revision 4bab7bf3)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1997, 1998 John S. Dyson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *	notice immediately at the beginning of the file, without modification,
12  *	this list of conditions, and the following disclaimer.
13  * 2. Absolutely no warranty of function or purpose is made by the author
14  *	John S. Dyson.
15  *
16  * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
17  */
18 
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
23 #include <sys/lock.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
27 
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
35 
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
38 
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
40 
41 #define	ZONE_ERROR_INVALID 0
42 #define	ZONE_ERROR_NOTFREE 1
43 #define	ZONE_ERROR_ALREADYFREE 2
44 
45 #define ZONE_ROUNDING	32
46 
47 #define	ZENTRY_FREE	0x12342378
48 
49 long zone_burst = 128;
50 
51 static void *zget(vm_zone_t z);
52 
53 /*
54  * Return an item from the specified zone.   This function is non-blocking for
55  * ZONE_INTERRUPT zones.
56  *
57  * No requirements.
58  */
59 void *
60 zalloc(vm_zone_t z)
61 {
62 	globaldata_t gd = mycpu;
63 	vm_zpcpu_t *zpcpu;
64 	void *item;
65 	long n;
66 
67 #ifdef INVARIANTS
68 	if (z == NULL)
69 		zerror(ZONE_ERROR_INVALID);
70 #endif
71 	zpcpu = &z->zpcpu[gd->gd_cpuid];
72 retry:
73 	/*
74 	 * Avoid spinlock contention by allocating from a per-cpu queue
75 	 */
76 	if (zpcpu->zfreecnt > 0) {
77 		crit_enter_gd(gd);
78 		if (zpcpu->zfreecnt > 0) {
79 			item = zpcpu->zitems;
80 #ifdef INVARIANTS
81 			KASSERT(item != NULL,
82 				("zitems_pcpu unexpectedly NULL"));
83 			if (((void **)item)[1] != (void *)ZENTRY_FREE)
84 				zerror(ZONE_ERROR_NOTFREE);
85 			((void **)item)[1] = NULL;
86 #endif
87 			zpcpu->zitems = ((void **) item)[0];
88 			--zpcpu->zfreecnt;
89 			++zpcpu->znalloc;
90 			crit_exit_gd(gd);
91 
92 			return item;
93 		}
94 		crit_exit_gd(gd);
95 	}
96 
97 	/*
98 	 * Per-zone spinlock for the remainder.  Always load at least one
99 	 * item.
100 	 */
101 	spin_lock(&z->zspin);
102 	if (z->zfreecnt > z->zfreemin) {
103 		n = zone_burst;
104 		do {
105 			item = z->zitems;
106 #ifdef INVARIANTS
107 			KASSERT(item != NULL, ("zitems unexpectedly NULL"));
108 			if (((void **)item)[1] != (void *)ZENTRY_FREE)
109 				zerror(ZONE_ERROR_NOTFREE);
110 #endif
111 			z->zitems = ((void **)item)[0];
112 			--z->zfreecnt;
113 			((void **)item)[0] = zpcpu->zitems;
114 			zpcpu->zitems = item;
115 			++zpcpu->zfreecnt;
116 		} while (--n > 0 && z->zfreecnt > z->zfreemin);
117 		spin_unlock(&z->zspin);
118 		goto retry;
119 	} else {
120 		spin_unlock(&z->zspin);
121 		item = zget(z);
122 		/*
123 		 * PANICFAIL allows the caller to assume that the zalloc()
124 		 * will always succeed.  If it doesn't, we panic here.
125 		 */
126 		if (item == NULL && (z->zflags & ZONE_PANICFAIL))
127 			panic("zalloc(%s) failed", z->zname);
128 	}
129 	return item;
130 }
131 
132 /*
133  * Free an item to the specified zone.
134  *
135  * No requirements.
136  */
137 void
138 zfree(vm_zone_t z, void *item)
139 {
140 	globaldata_t gd = mycpu;
141 	vm_zpcpu_t *zpcpu;
142 	void *tail_item;
143 	long count;
144 	long zmax;
145 
146 	zpcpu = &z->zpcpu[gd->gd_cpuid];
147 
148 	/*
149 	 * Avoid spinlock contention by freeing into a per-cpu queue
150 	 */
151 	zmax = z->zmax_pcpu;
152 	if (zmax < 1024)
153 		zmax = 1024;
154 
155 	/*
156 	 * Add to pcpu cache
157 	 */
158 	crit_enter_gd(gd);
159 	((void **)item)[0] = zpcpu->zitems;
160 #ifdef INVARIANTS
161 	if (((void **)item)[1] == (void *)ZENTRY_FREE)
162 		zerror(ZONE_ERROR_ALREADYFREE);
163 	((void **)item)[1] = (void *)ZENTRY_FREE;
164 #endif
165 	zpcpu->zitems = item;
166 	++zpcpu->zfreecnt;
167 
168 	if (zpcpu->zfreecnt < zmax) {
169 		crit_exit_gd(gd);
170 		return;
171 	}
172 
173 	/*
174 	 * Hystereis, move (zmax) (calculated below) items to the pool.
175 	 */
176 	zmax = zmax / 2;
177 	if (zmax > zone_burst)
178 		zmax = zone_burst;
179 	tail_item = item;
180 	count = 1;
181 
182 	while (count < zmax) {
183 		tail_item = ((void **)tail_item)[0];
184 		++count;
185 	}
186 	zpcpu->zitems = ((void **)tail_item)[0];
187 	zpcpu->zfreecnt -= count;
188 
189 	/*
190 	 * Per-zone spinlock for the remainder.
191 	 *
192 	 * Also implement hysteresis by freeing a number of pcpu
193 	 * entries.
194 	 */
195 	spin_lock(&z->zspin);
196 	((void **)tail_item)[0] = z->zitems;
197 	z->zitems = item;
198 	z->zfreecnt += count;
199 	spin_unlock(&z->zspin);
200 
201 	crit_exit_gd(gd);
202 }
203 
204 /*
205  * This file comprises a very simple zone allocator.  This is used
206  * in lieu of the malloc allocator, where needed or more optimal.
207  *
208  * Note that the initial implementation of this had coloring, and
209  * absolutely no improvement (actually perf degradation) occurred.
210  *
211  * Note also that the zones are type stable.  The only restriction is
212  * that the first two longwords of a data structure can be changed
213  * between allocations.  Any data that must be stable between allocations
214  * must reside in areas after the first two longwords.
215  *
216  * zinitna, zinit, zbootinit are the initialization routines.
217  * zalloc, zfree, are the allocation/free routines.
218  */
219 
220 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
221 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
222 static vm_pindex_t zone_kmem_pages, zone_kern_pages;
223 static long zone_kmem_kvaspace;
224 
225 /*
226  * Create a zone, but don't allocate the zone structure.  If the
227  * zone had been previously created by the zone boot code, initialize
228  * various parts of the zone code.
229  *
230  * If waits are not allowed during allocation (e.g. during interrupt
231  * code), a-priori allocate the kernel virtual space, and allocate
232  * only pages when needed.
233  *
234  * Arguments:
235  * z		pointer to zone structure.
236  * obj		pointer to VM object (opt).
237  * name		name of zone.
238  * size		size of zone entries.
239  * nentries	number of zone entries allocated (only ZONE_INTERRUPT.)
240  * flags	ZONE_INTERRUPT -- items can be allocated at interrupt time.
241  * zalloc	number of pages allocated when memory is needed.
242  *
243  * Note that when using ZONE_INTERRUPT, the size of the zone is limited
244  * by the nentries argument.  The size of the memory allocatable is
245  * unlimited if ZONE_INTERRUPT is not set.
246  *
247  * No requirements.
248  */
249 int
250 zinitna(vm_zone_t z, char *name, size_t size, long nentries, uint32_t flags)
251 {
252 	size_t totsize;
253 
254 	/*
255 	 * Only zones created with zinit() are destroyable.
256 	 */
257 	if (z->zflags & ZONE_DESTROYABLE)
258 		panic("zinitna: can't create destroyable zone");
259 
260 	/*
261 	 * NOTE: We can only adjust zsize if we previously did not
262 	 * 	 use zbootinit().
263 	 */
264 	if ((z->zflags & ZONE_BOOT) == 0) {
265 		z->zsize = roundup2(size, ZONE_ROUNDING);
266 		spin_init(&z->zspin, "zinitna");
267 		z->zfreecnt = 0;
268 		z->ztotal = 0;
269 		z->zmax = 0;
270 		z->zname = name;
271 		z->zitems = NULL;
272 
273 		lwkt_gettoken(&vm_token);
274 		LIST_INSERT_HEAD(&zlist, z, zlink);
275 		lwkt_reltoken(&vm_token);
276 
277 		bzero(z->zpcpu, sizeof(z->zpcpu));
278 	}
279 
280 	z->zkmvec = NULL;
281 	z->zkmcur = z->zkmmax = 0;
282 	z->zflags |= flags;
283 
284 	/*
285 	 * If we cannot wait, allocate KVA space up front, and we will fill
286 	 * in pages as needed.  This is particularly required when creating
287 	 * an allocation space for map entries in kernel_map, because we
288 	 * do not want to go into a recursion deadlock with
289 	 * vm_map_entry_reserve().
290 	 */
291 	if (z->zflags & ZONE_INTERRUPT) {
292 		totsize = round_page((size_t)z->zsize * nentries);
293 		atomic_add_long(&zone_kmem_kvaspace, totsize);
294 
295 		z->zkva = kmem_alloc_pageable(&kernel_map, totsize,
296 					      VM_SUBSYS_ZALLOC);
297 		if (z->zkva == 0) {
298 			LIST_REMOVE(z, zlink);
299 			return 0;
300 		}
301 
302 		z->zpagemax = totsize / PAGE_SIZE;
303 		z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
304 				VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
305 		z->zmax += nentries;
306 		z->zmax_pcpu = z->zmax / ncpus / 16;
307 
308 		/*
309 		 * Set reasonable pcpu cache bounds.  Low-memory systems
310 		 * might try to cache too little, large-memory systems
311 		 * might try to cache more than necessarsy.
312 		 *
313 		 * In particular, pvzone can wind up being excessive and
314 		 * waste memory unnecessarily.
315 		 */
316 		if (z->zmax_pcpu < 1024)
317 			z->zmax_pcpu = 1024;
318 		if (z->zmax_pcpu * z->zsize > 16*1024*1024)
319 			z->zmax_pcpu = 16*1024*1024 / z->zsize;
320 	} else {
321 		z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
322 		z->zmax = 0;
323 		z->zmax_pcpu = 8192;
324 	}
325 
326 
327 	if (z->zsize > PAGE_SIZE)
328 		z->zfreemin = 1;
329 	else
330 		z->zfreemin = PAGE_SIZE / z->zsize;
331 
332 	z->zpagecount = 0;
333 
334 	/*
335 	 * Reduce kernel_map spam by allocating in chunks.
336 	 */
337 	z->zalloc = ZONE_MAXPGLOAD;
338 
339 	/*
340 	 * Populate the interrrupt zone at creation time rather than
341 	 * on first allocation, as this is a potentially long operation.
342 	 */
343 	if (z->zflags & ZONE_INTERRUPT) {
344 		void *buf;
345 
346 		buf = zget(z);
347 		if (buf)
348 			zfree(z, buf);
349 	}
350 
351 	return 1;
352 }
353 
354 /*
355  * Subroutine same as zinitna, except zone data structure is allocated
356  * automatically by malloc.  This routine should normally be used, except
357  * in certain tricky startup conditions in the VM system -- then
358  * zbootinit and zinitna can be used.  Zinit is the standard zone
359  * initialization call.
360  *
361  * No requirements.
362  */
363 vm_zone_t
364 zinit(char *name, size_t size, long nentries, uint32_t flags)
365 {
366 	vm_zone_t z;
367 
368 	z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
369 	if (z == NULL)
370 		return NULL;
371 
372 	z->zflags = 0;
373 	if (zinitna(z, name, size, nentries, flags & ~ZONE_DESTROYABLE) == 0) {
374 		kfree(z, M_ZONE);
375 		return NULL;
376 	}
377 
378 	if (flags & ZONE_DESTROYABLE)
379 		z->zflags |= ZONE_DESTROYABLE;
380 
381 	return z;
382 }
383 
384 /*
385  * Initialize a zone before the system is fully up.  This routine should
386  * only be called before full VM startup.
387  *
388  * Called from the low level boot code only.
389  */
390 void
391 zbootinit(vm_zone_t z, char *name, size_t size, void *item, long nitems)
392 {
393 	long i;
394 
395 	spin_init(&z->zspin, "zbootinit");
396 	bzero(z->zpcpu, sizeof(z->zpcpu));
397 	z->zname = name;
398 	z->zsize = size;
399 	z->zpagemax = 0;
400 	z->zflags = ZONE_BOOT;
401 	z->zfreemin = 0;
402 	z->zallocflag = 0;
403 	z->zpagecount = 0;
404 	z->zalloc = 0;
405 
406 	bzero(item, (size_t)nitems * z->zsize);
407 	z->zitems = NULL;
408 	for (i = 0; i < nitems; i++) {
409 		((void **)item)[0] = z->zitems;
410 #ifdef INVARIANTS
411 		((void **)item)[1] = (void *)ZENTRY_FREE;
412 #endif
413 		z->zitems = item;
414 		item = (uint8_t *)item + z->zsize;
415 	}
416 	z->zfreecnt = nitems;
417 	z->zmax = nitems;
418 	z->ztotal = nitems;
419 
420 	lwkt_gettoken(&vm_token);
421 	LIST_INSERT_HEAD(&zlist, z, zlink);
422 	lwkt_reltoken(&vm_token);
423 }
424 
425 /*
426  * Release all resources owned by zone created with zinit().
427  *
428  * No requirements.
429  */
430 void
431 zdestroy(vm_zone_t z)
432 {
433 	vm_pindex_t i;
434 
435 	if (z == NULL)
436 		panic("zdestroy: null zone");
437 	if ((z->zflags & ZONE_DESTROYABLE) == 0)
438 		panic("zdestroy: undestroyable zone");
439 
440 	lwkt_gettoken(&vm_token);
441 	LIST_REMOVE(z, zlink);
442 	lwkt_reltoken(&vm_token);
443 
444 	/*
445 	 * Release virtual mappings, physical memory and update sysctl stats.
446 	 */
447 	KKASSERT((z->zflags & ZONE_INTERRUPT) == 0);
448 	for (i = 0; i < z->zkmcur; i++) {
449 		kmem_free(&kernel_map, z->zkmvec[i],
450 			  (size_t)z->zalloc * PAGE_SIZE);
451 		atomic_subtract_long(&zone_kern_pages, z->zalloc);
452 	}
453 	if (z->zkmvec != NULL)
454 		kfree(z->zkmvec, M_ZONE);
455 
456 	spin_uninit(&z->zspin);
457 	kfree(z, M_ZONE);
458 }
459 
460 
461 /*
462  * void *zalloc(vm_zone_t zone) --
463  *	Returns an item from a specified zone.  May not be called from a
464  *	FAST interrupt or IPI function.
465  *
466  * void zfree(vm_zone_t zone, void *item) --
467  *	Frees an item back to a specified zone.  May not be called from a
468  *	FAST interrupt or IPI function.
469  */
470 
471 /*
472  * Internal zone routine.  Not to be called from external (non vm_zone) code.
473  *
474  * This function may return NULL.
475  *
476  * No requirements.
477  */
478 static void *
479 zget(vm_zone_t z)
480 {
481 	vm_page_t pgs[ZONE_MAXPGLOAD];
482 	vm_page_t m;
483 	long nitems;
484 	long savezpc;
485 	size_t nbytes;
486 	size_t noffset;
487 	void *item;
488 	vm_pindex_t npages;
489 	vm_pindex_t nalloc;
490 	vm_pindex_t i;
491 
492 	if (z == NULL)
493 		panic("zget: null zone");
494 
495 	if (z->zflags & ZONE_INTERRUPT) {
496 		/*
497 		 * Interrupt zones do not mess with the kernel_map, they
498 		 * simply populate an existing mapping.
499 		 *
500 		 * First allocate as many pages as we can, stopping at
501 		 * our limit or if the page allocation fails.  Try to
502 		 * avoid exhausting the interrupt free minimum by backing
503 		 * off to normal page allocations after a certain point.
504 		 */
505 		for (i = 0; i < ZONE_MAXPGLOAD && i < z->zalloc; ++i) {
506 			if (i < 4) {
507 				m = vm_page_alloc(NULL,
508 						  mycpu->gd_rand_incr++,
509 						  z->zallocflag);
510 			} else {
511 				m = vm_page_alloc(NULL,
512 						  mycpu->gd_rand_incr++,
513 						  VM_ALLOC_NORMAL |
514 						  VM_ALLOC_SYSTEM);
515 			}
516 			if (m == NULL)
517 				break;
518 			pgs[i] = m;
519 		}
520 		nalloc = i;
521 
522 		/*
523 		 * Account for the pages.
524 		 *
525 		 * NOTE! Do not allow overlap with a prior page as it
526 		 *	 may still be undergoing allocation on another
527 		 *	 cpu.
528 		 */
529 		spin_lock(&z->zspin);
530 		noffset = (size_t)z->zpagecount * PAGE_SIZE;
531 		/* noffset -= noffset % z->zsize; */
532 		savezpc = z->zpagecount;
533 		if (z->zpagecount + nalloc > z->zpagemax)
534 			z->zpagecount = z->zpagemax;
535 		else
536 			z->zpagecount += nalloc;
537 		item = (char *)z->zkva + noffset;
538 		npages = z->zpagecount - savezpc;
539 		nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
540 			 z->zsize;
541 		atomic_add_long(&zone_kmem_pages, npages);
542 		spin_unlock(&z->zspin);
543 
544 		/*
545 		 * Enter the pages into the reserved KVA space.
546 		 */
547 		for (i = 0; i < npages; ++i) {
548 			vm_offset_t zkva;
549 
550 			m = pgs[i];
551 			KKASSERT(m->queue == PQ_NONE);
552 			m->valid = VM_PAGE_BITS_ALL;
553 			vm_page_wire(m);
554 			vm_page_wakeup(m);
555 
556 			zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
557 			pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
558 			bzero((void *)zkva, PAGE_SIZE);
559 		}
560 		for (i = npages; i < nalloc; ++i) {
561 			m = pgs[i];
562 			vm_page_free(m);
563 		}
564 	} else if (z->zflags & ZONE_SPECIAL) {
565 		/*
566 		 * The special zone is the one used for vm_map_entry_t's.
567 		 * We have to avoid an infinite recursion in
568 		 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
569 		 * instead.  The map entries are pre-reserved by the kernel
570 		 * by vm_map_entry_reserve_cpu_init().
571 		 */
572 		nbytes = (size_t)z->zalloc * PAGE_SIZE;
573 
574 		item = (void *)kmem_alloc3(&kernel_map, nbytes,
575 					   VM_SUBSYS_ZALLOC, KM_KRESERVE);
576 
577 		/* note: z might be modified due to blocking */
578 		if (item != NULL) {
579 			atomic_add_long(&zone_kern_pages, z->zalloc);
580 			bzero(item, nbytes);
581 		} else {
582 			nbytes = 0;
583 		}
584 		nitems = nbytes / z->zsize;
585 	} else {
586 		/*
587 		 * Otherwise allocate KVA from the kernel_map.
588 		 */
589 		nbytes = (size_t)z->zalloc * PAGE_SIZE;
590 
591 		item = (void *)kmem_alloc3(&kernel_map, nbytes,
592 					   VM_SUBSYS_ZALLOC, 0);
593 
594 		/* note: z might be modified due to blocking */
595 		if (item != NULL) {
596 			atomic_add_long(&zone_kern_pages, z->zalloc);
597 			bzero(item, nbytes);
598 
599 			if (z->zflags & ZONE_DESTROYABLE) {
600 				if (z->zkmcur == z->zkmmax) {
601 					z->zkmmax =
602 						z->zkmmax==0 ? 1 : z->zkmmax*2;
603 					z->zkmvec = krealloc(z->zkmvec,
604 					    z->zkmmax * sizeof(z->zkmvec[0]),
605 					    M_ZONE, M_WAITOK);
606 				}
607 				z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
608 			}
609 		} else {
610 			nbytes = 0;
611 		}
612 		nitems = nbytes / z->zsize;
613 	}
614 
615 	/*
616 	 * Enter any new pages into the pool, reserving one, or get the
617 	 * item from the existing pool.
618 	 */
619 	spin_lock(&z->zspin);
620 	z->ztotal += nitems;
621 
622 	/*
623 	 * The zone code may need to allocate kernel memory, which can
624 	 * recurse zget() infinitely if we do not handle it properly.
625 	 * We deal with this by directly repopulating the pcpu vm_map_entry
626 	 * cache.
627 	 */
628 	if (nitems > 1 && (z->zflags & ZONE_SPECIAL)) {
629 		struct globaldata *gd = mycpu;
630 		vm_map_entry_t entry;
631 
632 		/*
633 		 * Make sure we have enough structures in gd_vme_base to handle
634 		 * the reservation request.
635 		 *
636 		 * The critical section protects access to the per-cpu gd.
637 		 */
638 		crit_enter();
639 		while (gd->gd_vme_avail < 2 && nitems > 1) {
640 			entry = item;
641 			entry->next = gd->gd_vme_base;
642 			gd->gd_vme_base = entry;
643 			atomic_add_int(&gd->gd_vme_avail, 1);
644 			item = (uint8_t *)item + z->zsize;
645 			--nitems;
646 		}
647 		crit_exit();
648 	}
649 
650 	if (nitems != 0) {
651 		/*
652 		 * Enter pages into the pool saving one for immediate
653 		 * allocation.
654 		 */
655 		nitems -= 1;
656 		for (i = 0; i < nitems; i++) {
657 			((void **)item)[0] = z->zitems;
658 #ifdef INVARIANTS
659 			((void **)item)[1] = (void *)ZENTRY_FREE;
660 #endif
661 			z->zitems = item;
662 			item = (uint8_t *)item + z->zsize;
663 		}
664 		z->zfreecnt += nitems;
665 		++z->znalloc;
666 	} else if (z->zfreecnt > 0) {
667 		/*
668 		 * Get an item from the existing pool.
669 		 */
670 		item = z->zitems;
671 		z->zitems = ((void **)item)[0];
672 #ifdef INVARIANTS
673 		if (((void **)item)[1] != (void *)ZENTRY_FREE)
674 			zerror(ZONE_ERROR_NOTFREE);
675 		((void **) item)[1] = NULL;
676 #endif
677 		--z->zfreecnt;
678 		++z->znalloc;
679 	} else {
680 		/*
681 		 * No items available.
682 		 */
683 		item = NULL;
684 	}
685 	spin_unlock(&z->zspin);
686 
687 	return item;
688 }
689 
690 /*
691  * No requirements.
692  */
693 static int
694 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
695 {
696 	vm_zone_t curzone;
697 	char tmpbuf[128];
698 	char tmpname[14];
699 	int error = 0;
700 
701 	ksnprintf(tmpbuf, sizeof(tmpbuf),
702 	    "\nITEM            SIZE     LIMIT    USED    FREE  REQUESTS\n");
703 	error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
704 	if (error)
705 		return (error);
706 
707 	lwkt_gettoken(&vm_token);
708 	LIST_FOREACH(curzone, &zlist, zlink) {
709 		size_t i;
710 		size_t len;
711 		int offset;
712 		long freecnt;
713 		long znalloc;
714 		int n;
715 
716 		len = strlen(curzone->zname);
717 		if (len >= (sizeof(tmpname) - 1))
718 			len = (sizeof(tmpname) - 1);
719 		for(i = 0; i < sizeof(tmpname) - 1; i++)
720 			tmpname[i] = ' ';
721 		tmpname[i] = 0;
722 		memcpy(tmpname, curzone->zname, len);
723 		tmpname[len] = ':';
724 		offset = 0;
725 		if (curzone == LIST_FIRST(&zlist)) {
726 			offset = 1;
727 			tmpbuf[0] = '\n';
728 		}
729 		freecnt = curzone->zfreecnt;
730 		znalloc = curzone->znalloc;
731 		for (n = 0; n < ncpus; ++n) {
732 			freecnt += curzone->zpcpu[n].zfreecnt;
733 			znalloc += curzone->zpcpu[n].znalloc;
734 		}
735 
736 		ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
737 			"%s %6.6lu, %8.8lu, %6.6lu, %6.6lu, %8.8lu\n",
738 			tmpname, curzone->zsize, curzone->zmax,
739 			(curzone->ztotal - freecnt),
740 			freecnt, znalloc);
741 
742 		len = strlen((char *)tmpbuf);
743 		if (LIST_NEXT(curzone, zlink) == NULL)
744 			tmpbuf[len - 1] = 0;
745 
746 		error = SYSCTL_OUT(req, tmpbuf, len);
747 
748 		if (error)
749 			break;
750 	}
751 	lwkt_reltoken(&vm_token);
752 	return (error);
753 }
754 
755 #if defined(INVARIANTS)
756 
757 /*
758  * Debugging only.
759  */
760 void
761 zerror(int error)
762 {
763 	char *msg;
764 
765 	switch (error) {
766 	case ZONE_ERROR_INVALID:
767 		msg = "zone: invalid zone";
768 		break;
769 	case ZONE_ERROR_NOTFREE:
770 		msg = "zone: entry not free";
771 		break;
772 	case ZONE_ERROR_ALREADYFREE:
773 		msg = "zone: freeing free entry";
774 		break;
775 	default:
776 		msg = "zone: invalid error";
777 		break;
778 	}
779 	panic("%s", msg);
780 }
781 #endif
782 
783 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
784 	NULL, 0, sysctl_vm_zone, "A", "Zone Info");
785 
786 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_pages,
787 	CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
788 SYSCTL_LONG(_vm, OID_AUTO, zone_burst,
789 	CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
790 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
791 	CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
792 SYSCTL_LONG(_vm, OID_AUTO, zone_kern_pages,
793 	CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");
794