xref: /dragonfly/sys/vm/vm_zone.c (revision 006835dc)
1 /*
2  * (MPSAFE)
3  *
4  * Copyright (c) 1997, 1998 John S. Dyson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *	notice immediately at the beginning of the file, without modification,
12  *	this list of conditions, and the following disclaimer.
13  * 2. Absolutely no warranty of function or purpose is made by the author
14  *	John S. Dyson.
15  *
16  * $FreeBSD: src/sys/vm/vm_zone.c,v 1.30.2.6 2002/10/10 19:50:16 dillon Exp $
17  */
18 
19 #include <sys/param.h>
20 #include <sys/queue.h>
21 #include <sys/systm.h>
22 #include <sys/kernel.h>
23 #include <sys/lock.h>
24 #include <sys/malloc.h>
25 #include <sys/sysctl.h>
26 #include <sys/vmmeter.h>
27 
28 #include <vm/vm.h>
29 #include <vm/vm_object.h>
30 #include <vm/vm_page.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_kern.h>
33 #include <vm/vm_extern.h>
34 #include <vm/vm_zone.h>
35 
36 #include <sys/spinlock2.h>
37 #include <vm/vm_page2.h>
38 
39 static MALLOC_DEFINE(M_ZONE, "ZONE", "Zone header");
40 
41 #define	ZONE_ERROR_INVALID 0
42 #define	ZONE_ERROR_NOTFREE 1
43 #define	ZONE_ERROR_ALREADYFREE 2
44 
45 #define ZONE_ROUNDING	32
46 
47 #define	ZENTRY_FREE	0x12342378
48 
49 int zone_burst = 32;
50 
51 static void *zget(vm_zone_t z);
52 
53 /*
54  * Return an item from the specified zone.   This function is non-blocking for
55  * ZONE_INTERRUPT zones.
56  *
57  * No requirements.
58  */
59 void *
60 zalloc(vm_zone_t z)
61 {
62 	globaldata_t gd = mycpu;
63 	void *item;
64 	int n;
65 
66 #ifdef INVARIANTS
67 	if (z == NULL)
68 		zerror(ZONE_ERROR_INVALID);
69 #endif
70 retry:
71 	/*
72 	 * Avoid spinlock contention by allocating from a per-cpu queue
73 	 */
74 	if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
75 		crit_enter_gd(gd);
76 		if (z->zfreecnt_pcpu[gd->gd_cpuid] > 0) {
77 			item = z->zitems_pcpu[gd->gd_cpuid];
78 #ifdef INVARIANTS
79 			KASSERT(item != NULL,
80 				("zitems_pcpu unexpectedly NULL"));
81 			if (((void **)item)[1] != (void *)ZENTRY_FREE)
82 				zerror(ZONE_ERROR_NOTFREE);
83 			((void **)item)[1] = NULL;
84 #endif
85 			z->zitems_pcpu[gd->gd_cpuid] = ((void **) item)[0];
86 			--z->zfreecnt_pcpu[gd->gd_cpuid];
87 			z->znalloc++;
88 			crit_exit_gd(gd);
89 			return item;
90 		}
91 		crit_exit_gd(gd);
92 	}
93 
94 	/*
95 	 * Per-zone spinlock for the remainder.  Always load at least one
96 	 * item.
97 	 */
98 	spin_lock(&z->zlock);
99 	if (z->zfreecnt > z->zfreemin) {
100 		n = zone_burst;
101 		do {
102 			item = z->zitems;
103 #ifdef INVARIANTS
104 			KASSERT(item != NULL, ("zitems unexpectedly NULL"));
105 			if (((void **)item)[1] != (void *)ZENTRY_FREE)
106 				zerror(ZONE_ERROR_NOTFREE);
107 #endif
108 			z->zitems = ((void **)item)[0];
109 			z->zfreecnt--;
110 			((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
111 			z->zitems_pcpu[gd->gd_cpuid] = item;
112 			++z->zfreecnt_pcpu[gd->gd_cpuid];
113 		} while (--n > 0 && z->zfreecnt > z->zfreemin);
114 		spin_unlock(&z->zlock);
115 		goto retry;
116 	} else {
117 		spin_unlock(&z->zlock);
118 		item = zget(z);
119 		/*
120 		 * PANICFAIL allows the caller to assume that the zalloc()
121 		 * will always succeed.  If it doesn't, we panic here.
122 		 */
123 		if (item == NULL && (z->zflags & ZONE_PANICFAIL))
124 			panic("zalloc(%s) failed", z->zname);
125 	}
126 	return item;
127 }
128 
129 /*
130  * Free an item to the specified zone.
131  *
132  * No requirements.
133  */
134 void
135 zfree(vm_zone_t z, void *item)
136 {
137 	globaldata_t gd = mycpu;
138 	void *tail_item;
139 	int count;
140 	int zmax;
141 
142 	/*
143 	 * Avoid spinlock contention by freeing into a per-cpu queue
144 	 */
145 	if ((zmax = z->zmax) != 0)
146 		zmax = zmax / ncpus / 16;
147 	if (zmax < 64)
148 		zmax = 64;
149 
150 	/*
151 	 * Add to pcpu cache
152 	 */
153 	crit_enter_gd(gd);
154 	((void **)item)[0] = z->zitems_pcpu[gd->gd_cpuid];
155 #ifdef INVARIANTS
156 	if (((void **)item)[1] == (void *)ZENTRY_FREE)
157 		zerror(ZONE_ERROR_ALREADYFREE);
158 	((void **)item)[1] = (void *)ZENTRY_FREE;
159 #endif
160 	z->zitems_pcpu[gd->gd_cpuid] = item;
161 	++z->zfreecnt_pcpu[gd->gd_cpuid];
162 
163 	if (z->zfreecnt_pcpu[gd->gd_cpuid] < zmax) {
164 		crit_exit_gd(gd);
165 		return;
166 	}
167 
168 	/*
169 	 * Hystereis, move (zmax) (calculated below) items to the pool.
170 	 */
171 	zmax = zmax / 2;
172 	if (zmax > zone_burst)
173 		zmax = zone_burst;
174 	tail_item = item;
175 	count = 1;
176 
177 	while (count < zmax) {
178 		tail_item = ((void **)tail_item)[0];
179 		++count;
180 	}
181 
182 	z->zitems_pcpu[gd->gd_cpuid] = ((void **)tail_item)[0];
183 	z->zfreecnt_pcpu[gd->gd_cpuid] -= count;
184 
185 	/*
186 	 * Per-zone spinlock for the remainder.
187 	 *
188 	 * Also implement hysteresis by freeing a number of pcpu
189 	 * entries.
190 	 */
191 	spin_lock(&z->zlock);
192 	((void **)tail_item)[0] = z->zitems;
193 	z->zitems = item;
194 	z->zfreecnt += count;
195 
196 	spin_unlock(&z->zlock);
197 	crit_exit_gd(gd);
198 }
199 
200 /*
201  * This file comprises a very simple zone allocator.  This is used
202  * in lieu of the malloc allocator, where needed or more optimal.
203  *
204  * Note that the initial implementation of this had coloring, and
205  * absolutely no improvement (actually perf degradation) occurred.
206  *
207  * Note also that the zones are type stable.  The only restriction is
208  * that the first two longwords of a data structure can be changed
209  * between allocations.  Any data that must be stable between allocations
210  * must reside in areas after the first two longwords.
211  *
212  * zinitna, zinit, zbootinit are the initialization routines.
213  * zalloc, zfree, are the allocation/free routines.
214  */
215 
216 LIST_HEAD(zlist, vm_zone) zlist = LIST_HEAD_INITIALIZER(zlist);
217 static int sysctl_vm_zone(SYSCTL_HANDLER_ARGS);
218 static int zone_kmem_pages, zone_kern_pages;
219 static long zone_kmem_kvaspace;
220 
221 /*
222  * Create a zone, but don't allocate the zone structure.  If the
223  * zone had been previously created by the zone boot code, initialize
224  * various parts of the zone code.
225  *
226  * If waits are not allowed during allocation (e.g. during interrupt
227  * code), a-priori allocate the kernel virtual space, and allocate
228  * only pages when needed.
229  *
230  * Arguments:
231  * z		pointer to zone structure.
232  * obj		pointer to VM object (opt).
233  * name		name of zone.
234  * size		size of zone entries.
235  * nentries	number of zone entries allocated (only ZONE_INTERRUPT.)
236  * flags	ZONE_INTERRUPT -- items can be allocated at interrupt time.
237  * zalloc	number of pages allocated when memory is needed.
238  *
239  * Note that when using ZONE_INTERRUPT, the size of the zone is limited
240  * by the nentries argument.  The size of the memory allocatable is
241  * unlimited if ZONE_INTERRUPT is not set.
242  *
243  * No requirements.
244  */
245 int
246 zinitna(vm_zone_t z, vm_object_t obj, char *name, int size,
247 	int nentries, int flags)
248 {
249 	size_t totsize;
250 
251 	/*
252 	 * Only zones created with zinit() are destroyable.
253 	 */
254 	if (z->zflags & ZONE_DESTROYABLE)
255 		panic("zinitna: can't create destroyable zone");
256 
257 	/*
258 	 * NOTE: We can only adjust zsize if we previously did not
259 	 * 	 use zbootinit().
260 	 */
261 	if ((z->zflags & ZONE_BOOT) == 0) {
262 		z->zsize = roundup2(size, ZONE_ROUNDING);
263 		spin_init(&z->zlock, "zinitna");
264 		z->zfreecnt = 0;
265 		z->ztotal = 0;
266 		z->zmax = 0;
267 		z->zname = name;
268 		z->znalloc = 0;
269 		z->zitems = NULL;
270 
271 		lwkt_gettoken(&vm_token);
272 		LIST_INSERT_HEAD(&zlist, z, zlink);
273 		lwkt_reltoken(&vm_token);
274 
275 		bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
276 		bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
277 	}
278 
279 	z->zkmvec = NULL;
280 	z->zkmcur = z->zkmmax = 0;
281 	z->zflags |= flags;
282 
283 	/*
284 	 * If we cannot wait, allocate KVA space up front, and we will fill
285 	 * in pages as needed.  This is particularly required when creating
286 	 * an allocation space for map entries in kernel_map, because we
287 	 * do not want to go into a recursion deadlock with
288 	 * vm_map_entry_reserve().
289 	 */
290 	if (z->zflags & ZONE_INTERRUPT) {
291 		totsize = round_page((size_t)z->zsize * nentries);
292 		atomic_add_long(&zone_kmem_kvaspace, totsize);
293 
294 		z->zkva = kmem_alloc_pageable(&kernel_map, totsize,
295 					      VM_SUBSYS_ZALLOC);
296 		if (z->zkva == 0) {
297 			LIST_REMOVE(z, zlink);
298 			return 0;
299 		}
300 
301 		z->zpagemax = totsize / PAGE_SIZE;
302 		if (obj == NULL) {
303 			z->zobj = vm_object_allocate(OBJT_DEFAULT, z->zpagemax);
304 		} else {
305 			z->zobj = obj;
306 			_vm_object_allocate(OBJT_DEFAULT, z->zpagemax, obj);
307 			vm_object_drop(obj);
308 		}
309 		z->zallocflag = VM_ALLOC_SYSTEM | VM_ALLOC_INTERRUPT |
310 				VM_ALLOC_NORMAL | VM_ALLOC_RETRY;
311 		z->zmax += nentries;
312 	} else {
313 		z->zallocflag = VM_ALLOC_NORMAL | VM_ALLOC_SYSTEM;
314 		z->zmax = 0;
315 	}
316 
317 
318 	if (z->zsize > PAGE_SIZE)
319 		z->zfreemin = 1;
320 	else
321 		z->zfreemin = PAGE_SIZE / z->zsize;
322 
323 	z->zpagecount = 0;
324 
325 	/*
326 	 * Reduce kernel_map spam by allocating in chunks of 4 pages.
327 	 */
328 	z->zalloc = 4;
329 
330 	/*
331 	 * Populate the interrrupt zone at creation time rather than
332 	 * on first allocation, as this is a potentially long operation.
333 	 */
334 	if (z->zflags & ZONE_INTERRUPT) {
335 		void *buf;
336 
337 		buf = zget(z);
338 		zfree(z, buf);
339 	}
340 
341 	return 1;
342 }
343 
344 /*
345  * Subroutine same as zinitna, except zone data structure is allocated
346  * automatically by malloc.  This routine should normally be used, except
347  * in certain tricky startup conditions in the VM system -- then
348  * zbootinit and zinitna can be used.  Zinit is the standard zone
349  * initialization call.
350  *
351  * No requirements.
352  */
353 vm_zone_t
354 zinit(char *name, int size, int nentries, int flags)
355 {
356 	vm_zone_t z;
357 
358 	z = (vm_zone_t) kmalloc(sizeof (struct vm_zone), M_ZONE, M_NOWAIT);
359 	if (z == NULL)
360 		return NULL;
361 
362 	z->zflags = 0;
363 	if (zinitna(z, NULL, name, size, nentries,
364 	            flags & ~ZONE_DESTROYABLE) == 0) {
365 		kfree(z, M_ZONE);
366 		return NULL;
367 	}
368 
369 	if (flags & ZONE_DESTROYABLE)
370 		z->zflags |= ZONE_DESTROYABLE;
371 
372 	return z;
373 }
374 
375 /*
376  * Initialize a zone before the system is fully up.  This routine should
377  * only be called before full VM startup.
378  *
379  * Called from the low level boot code only.
380  */
381 void
382 zbootinit(vm_zone_t z, char *name, int size, void *item, int nitems)
383 {
384 	int i;
385 
386 	bzero(z->zitems_pcpu, sizeof(z->zitems_pcpu));
387 	bzero(z->zfreecnt_pcpu, sizeof(z->zfreecnt_pcpu));
388 
389 	z->zname = name;
390 	z->zsize = size;
391 	z->zpagemax = 0;
392 	z->zobj = NULL;
393 	z->zflags = ZONE_BOOT;
394 	z->zfreemin = 0;
395 	z->zallocflag = 0;
396 	z->zpagecount = 0;
397 	z->zalloc = 0;
398 	z->znalloc = 0;
399 	spin_init(&z->zlock, "zbootinit");
400 
401 	bzero(item, (size_t)nitems * z->zsize);
402 	z->zitems = NULL;
403 	for (i = 0; i < nitems; i++) {
404 		((void **)item)[0] = z->zitems;
405 #ifdef INVARIANTS
406 		((void **)item)[1] = (void *)ZENTRY_FREE;
407 #endif
408 		z->zitems = item;
409 		item = (uint8_t *)item + z->zsize;
410 	}
411 	z->zfreecnt = nitems;
412 	z->zmax = nitems;
413 	z->ztotal = nitems;
414 
415 	lwkt_gettoken(&vm_token);
416 	LIST_INSERT_HEAD(&zlist, z, zlink);
417 	lwkt_reltoken(&vm_token);
418 }
419 
420 /*
421  * Release all resources owned by zone created with zinit().
422  *
423  * No requirements.
424  */
425 void
426 zdestroy(vm_zone_t z)
427 {
428 	vm_page_t m;
429 	int i;
430 
431 	if (z == NULL)
432 		panic("zdestroy: null zone");
433 	if ((z->zflags & ZONE_DESTROYABLE) == 0)
434 		panic("zdestroy: undestroyable zone");
435 
436 	lwkt_gettoken(&vm_token);
437 	LIST_REMOVE(z, zlink);
438 	lwkt_reltoken(&vm_token);
439 
440 	/*
441 	 * Release virtual mappings, physical memory and update sysctl stats.
442 	 */
443 	if (z->zflags & ZONE_INTERRUPT) {
444 		/*
445 		 * Pages mapped via pmap_kenter() must be removed from the
446 		 * kernel_pmap() before calling kmem_free() to avoid issues
447 		 * with kernel_pmap.pm_stats.resident_count.
448 		 */
449 		pmap_qremove(z->zkva, z->zpagemax);
450 		vm_object_hold(z->zobj);
451 		for (i = 0; i < z->zpagecount; ++i) {
452 			m = vm_page_lookup_busy_wait(z->zobj, i, TRUE, "vmzd");
453 			vm_page_unwire(m, 0);
454 			vm_page_free(m);
455 		}
456 
457 		/*
458 		 * Free the mapping.
459 		 */
460 		kmem_free(&kernel_map, z->zkva,
461 			  (size_t)z->zpagemax * PAGE_SIZE);
462 		atomic_subtract_long(&zone_kmem_kvaspace,
463 				     (size_t)z->zpagemax * PAGE_SIZE);
464 
465 		/*
466 		 * Free the backing object and physical pages.
467 		 */
468 		vm_object_deallocate(z->zobj);
469 		vm_object_drop(z->zobj);
470 		atomic_subtract_int(&zone_kmem_pages, z->zpagecount);
471 	} else {
472 		for (i = 0; i < z->zkmcur; i++) {
473 			kmem_free(&kernel_map, z->zkmvec[i],
474 				  (size_t)z->zalloc * PAGE_SIZE);
475 			atomic_subtract_int(&zone_kern_pages, z->zalloc);
476 		}
477 		if (z->zkmvec != NULL)
478 			kfree(z->zkmvec, M_ZONE);
479 	}
480 
481 	spin_uninit(&z->zlock);
482 	kfree(z, M_ZONE);
483 }
484 
485 
486 /*
487  * void *zalloc(vm_zone_t zone) --
488  *	Returns an item from a specified zone.  May not be called from a
489  *	FAST interrupt or IPI function.
490  *
491  * void zfree(vm_zone_t zone, void *item) --
492  *	Frees an item back to a specified zone.  May not be called from a
493  *	FAST interrupt or IPI function.
494  */
495 
496 /*
497  * Internal zone routine.  Not to be called from external (non vm_zone) code.
498  *
499  * No requirements.
500  */
501 static void *
502 zget(vm_zone_t z)
503 {
504 	int i;
505 	vm_page_t m;
506 	int nitems;
507 	int npages;
508 	int savezpc;
509 	size_t nbytes;
510 	size_t noffset;
511 	void *item;
512 
513 	if (z == NULL)
514 		panic("zget: null zone");
515 
516 	if (z->zflags & ZONE_INTERRUPT) {
517 		/*
518 		 * Interrupt zones do not mess with the kernel_map, they
519 		 * simply populate an existing mapping.
520 		 *
521 		 * First reserve the required space.
522 		 */
523 		vm_object_hold(z->zobj);
524 		noffset = (size_t)z->zpagecount * PAGE_SIZE;
525 		noffset -= noffset % z->zsize;
526 		savezpc = z->zpagecount;
527 		if (z->zpagecount + z->zalloc > z->zpagemax)
528 			z->zpagecount = z->zpagemax;
529 		else
530 			z->zpagecount += z->zalloc;
531 		item = (char *)z->zkva + noffset;
532 		npages = z->zpagecount - savezpc;
533 		nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) /
534 			 z->zsize;
535 		atomic_add_int(&zone_kmem_pages, npages);
536 
537 		/*
538 		 * Now allocate the pages.  Note that we can block in the
539 		 * loop, so we've already done all the necessary calculations
540 		 * and reservations above.
541 		 */
542 		for (i = 0; i < npages; ++i) {
543 			vm_offset_t zkva;
544 
545 			m = vm_page_alloc(z->zobj, savezpc + i, z->zallocflag);
546 			KKASSERT(m != NULL);
547 			/* note: z might be modified due to blocking */
548 
549 			KKASSERT(m->queue == PQ_NONE);
550 			m->valid = VM_PAGE_BITS_ALL;
551 			vm_page_wire(m);
552 			vm_page_wakeup(m);
553 
554 			zkva = z->zkva + (size_t)(savezpc + i) * PAGE_SIZE;
555 			pmap_kenter(zkva, VM_PAGE_TO_PHYS(m));
556 			bzero((void *)zkva, PAGE_SIZE);
557 		}
558 		vm_object_drop(z->zobj);
559 	} else if (z->zflags & ZONE_SPECIAL) {
560 		/*
561 		 * The special zone is the one used for vm_map_entry_t's.
562 		 * We have to avoid an infinite recursion in
563 		 * vm_map_entry_reserve() by using vm_map_entry_kreserve()
564 		 * instead.  The map entries are pre-reserved by the kernel
565 		 * by vm_map_entry_reserve_cpu_init().
566 		 */
567 		nbytes = (size_t)z->zalloc * PAGE_SIZE;
568 
569 		item = (void *)kmem_alloc3(&kernel_map, nbytes,
570 					   VM_SUBSYS_ZALLOC, KM_KRESERVE);
571 
572 		/* note: z might be modified due to blocking */
573 		if (item != NULL) {
574 			atomic_add_int(&zone_kern_pages, z->zalloc);
575 			bzero(item, nbytes);
576 		} else {
577 			nbytes = 0;
578 		}
579 		nitems = nbytes / z->zsize;
580 	} else {
581 		/*
582 		 * Otherwise allocate KVA from the kernel_map.
583 		 */
584 		nbytes = (size_t)z->zalloc * PAGE_SIZE;
585 
586 		item = (void *)kmem_alloc3(&kernel_map, nbytes,
587 					   VM_SUBSYS_ZALLOC, 0);
588 
589 		/* note: z might be modified due to blocking */
590 		if (item != NULL) {
591 			atomic_add_int(&zone_kern_pages, z->zalloc);
592 			bzero(item, nbytes);
593 
594 			if (z->zflags & ZONE_DESTROYABLE) {
595 				if (z->zkmcur == z->zkmmax) {
596 					z->zkmmax =
597 						z->zkmmax==0 ? 1 : z->zkmmax*2;
598 					z->zkmvec = krealloc(z->zkmvec,
599 					    z->zkmmax * sizeof(z->zkmvec[0]),
600 					    M_ZONE, M_WAITOK);
601 				}
602 				z->zkmvec[z->zkmcur++] = (vm_offset_t)item;
603 			}
604 		} else {
605 			nbytes = 0;
606 		}
607 		nitems = nbytes / z->zsize;
608 	}
609 
610 	spin_lock(&z->zlock);
611 	z->ztotal += nitems;
612 	/*
613 	 * Save one for immediate allocation
614 	 */
615 	if (nitems != 0) {
616 		nitems -= 1;
617 		for (i = 0; i < nitems; i++) {
618 			((void **)item)[0] = z->zitems;
619 #ifdef INVARIANTS
620 			((void **)item)[1] = (void *)ZENTRY_FREE;
621 #endif
622 			z->zitems = item;
623 			item = (uint8_t *)item + z->zsize;
624 		}
625 		z->zfreecnt += nitems;
626 		z->znalloc++;
627 	} else if (z->zfreecnt > 0) {
628 		item = z->zitems;
629 		z->zitems = ((void **)item)[0];
630 #ifdef INVARIANTS
631 		if (((void **)item)[1] != (void *)ZENTRY_FREE)
632 			zerror(ZONE_ERROR_NOTFREE);
633 		((void **) item)[1] = NULL;
634 #endif
635 		z->zfreecnt--;
636 		z->znalloc++;
637 	} else {
638 		item = NULL;
639 	}
640 	spin_unlock(&z->zlock);
641 
642 	/*
643 	 * A special zone may have used a kernel-reserved vm_map_entry.  If
644 	 * so we have to be sure to recover our reserve so we don't run out.
645 	 * We will panic if we run out.
646 	 */
647 	if (z->zflags & ZONE_SPECIAL)
648 		vm_map_entry_reserve(0);
649 
650 	return item;
651 }
652 
653 /*
654  * No requirements.
655  */
656 static int
657 sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
658 {
659 	int error=0;
660 	vm_zone_t curzone;
661 	char tmpbuf[128];
662 	char tmpname[14];
663 
664 	ksnprintf(tmpbuf, sizeof(tmpbuf),
665 	    "\nITEM            SIZE     LIMIT    USED    FREE  REQUESTS\n");
666 	error = SYSCTL_OUT(req, tmpbuf, strlen(tmpbuf));
667 	if (error)
668 		return (error);
669 
670 	lwkt_gettoken(&vm_token);
671 	LIST_FOREACH(curzone, &zlist, zlink) {
672 		int i;
673 		int n;
674 		int len;
675 		int offset;
676 		int freecnt;
677 
678 		len = strlen(curzone->zname);
679 		if (len >= (sizeof(tmpname) - 1))
680 			len = (sizeof(tmpname) - 1);
681 		for(i = 0; i < sizeof(tmpname) - 1; i++)
682 			tmpname[i] = ' ';
683 		tmpname[i] = 0;
684 		memcpy(tmpname, curzone->zname, len);
685 		tmpname[len] = ':';
686 		offset = 0;
687 		if (curzone == LIST_FIRST(&zlist)) {
688 			offset = 1;
689 			tmpbuf[0] = '\n';
690 		}
691 		freecnt = curzone->zfreecnt;
692 		for (n = 0; n < ncpus; ++n)
693 			freecnt += curzone->zfreecnt_pcpu[n];
694 
695 		ksnprintf(tmpbuf + offset, sizeof(tmpbuf) - offset,
696 			"%s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n",
697 			tmpname, curzone->zsize, curzone->zmax,
698 			(curzone->ztotal - freecnt),
699 			freecnt, curzone->znalloc);
700 
701 		len = strlen((char *)tmpbuf);
702 		if (LIST_NEXT(curzone, zlink) == NULL)
703 			tmpbuf[len - 1] = 0;
704 
705 		error = SYSCTL_OUT(req, tmpbuf, len);
706 
707 		if (error)
708 			break;
709 	}
710 	lwkt_reltoken(&vm_token);
711 	return (error);
712 }
713 
714 #if defined(INVARIANTS)
715 
716 /*
717  * Debugging only.
718  */
719 void
720 zerror(int error)
721 {
722 	char *msg;
723 
724 	switch (error) {
725 	case ZONE_ERROR_INVALID:
726 		msg = "zone: invalid zone";
727 		break;
728 	case ZONE_ERROR_NOTFREE:
729 		msg = "zone: entry not free";
730 		break;
731 	case ZONE_ERROR_ALREADYFREE:
732 		msg = "zone: freeing free entry";
733 		break;
734 	default:
735 		msg = "zone: invalid error";
736 		break;
737 	}
738 	panic("%s", msg);
739 }
740 #endif
741 
742 SYSCTL_OID(_vm, OID_AUTO, zone, CTLTYPE_STRING|CTLFLAG_RD, \
743 	NULL, 0, sysctl_vm_zone, "A", "Zone Info");
744 
745 SYSCTL_INT(_vm, OID_AUTO, zone_kmem_pages,
746 	CTLFLAG_RD, &zone_kmem_pages, 0, "Number of interrupt safe pages allocated by zone");
747 SYSCTL_INT(_vm, OID_AUTO, zone_burst,
748 	CTLFLAG_RW, &zone_burst, 0, "Burst from depot to pcpu cache");
749 SYSCTL_LONG(_vm, OID_AUTO, zone_kmem_kvaspace,
750 	CTLFLAG_RD, &zone_kmem_kvaspace, 0, "KVA space allocated by zone");
751 SYSCTL_INT(_vm, OID_AUTO, zone_kern_pages,
752 	CTLFLAG_RD, &zone_kern_pages, 0, "Number of non-interrupt safe pages allocated by zone");
753